diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg deleted file mode 100644 index 0a7eb0d4..00000000 --- a/ansible/ansible.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[defaults] - -# some basic default values... - -inventory = ./hosts \ No newline at end of file diff --git a/ansible/hosts b/ansible/hosts deleted file mode 100644 index fcbb084b..00000000 --- a/ansible/hosts +++ /dev/null @@ -1,21 +0,0 @@ -[hypervisor] -server3.hq.c3d2.de -server5.hq.c3d2.de -server6.hq.c3d2.de -server7.hq.c3d2.de -server8.hq.c3d2.de -server9.hq.c3d2.de - -[hypervisor:vars] -ansible_connection=ssh -ansible_user=root - -[kubernetes] -k8s-1.hq.c3d2.de -k8s-2.hq.c3d2.de -k8s-3.hq.c3d2.de -k8s-4.hq.c3d2.de - -[kubernetes:vars] -ansible_python_interpreter=/usr/bin/python3 -ansible_user=ubuntu diff --git a/ansible/hypervisor.yml b/ansible/hypervisor.yml deleted file mode 100644 index deaf84fa..00000000 --- a/ansible/hypervisor.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -# file: hypervisor.yml -- hosts: hypervisor - roles: - - proxmox - - { role: "elastic.beats", beat: "filebeat", - become: true, - tags: ["filebeat", "logging"], - beats_version: "7.12.1", - beat_conf: { - filebeat: { - "inputs":[{ - "type": log, - "enabled": true, - "paths": [ - "/var/log/ceph/*.log", - "/var/log/pve/tasks/*/*", - "/var/log/vzdump/*.log", - - ], - tags: ["hypervisor", "proxmox"] - }] - } - }, - "output_conf": { - "logstash": { - "hosts": ["logging.serv.zentralwerk.org:5044", "172.20.73.13:5044"] - } - }, - logging_conf: { - level: warning, - to_files: false - } - } - - { role: "elastic.beats", beat: "journalbeat", - become: true, - tags: ["journalbeat", "logging"], - beats_version: "7.12.1", - beat_conf: { - journalbeat: { - "inputs":[{ - seek: cursor, - "paths": [] - }] - }, - tags: ["hypervisor", "proxmox"] - }, - "output_conf": { - "logstash": { - "hosts": ["logging.serv.zentralwerk.org:5044", "172.20.73.13:5044"] - } - }, - logging_conf: { - level: warning, - to_files: false, - } - } diff --git a/ansible/kubernetes.yml b/ansible/kubernetes.yml deleted file mode 100644 index 45e2a655..00000000 --- a/ansible/kubernetes.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -# file: hypervisor.yml -- hosts: kubernetes - become: yes - become_user: root - become_method: sudo - vars: - check_mk_agent_over_ssh: False - check_mk_agent_monitoring_host: monit.hq.c3d2.de - check_mk_agent_monitoring_user: monitoring - check_mk_agent_add_to_wato: False - check_mk_agent_monitoring_host_wato_username: monitoring - check_mk_agent_monitoring_host_url: https://monit.hq.c3d2.de/c3d2/ - # check_mk_agent_local_checks: - # filecount: - # src: files/check_mk_local_checks/filecount - roles: - - k8s - - elnappo.check_mk_agent diff --git a/ansible/roles/elastic.beats/.ci/jobs/defaults.yml b/ansible/roles/elastic.beats/.ci/jobs/defaults.yml deleted file mode 100644 index b06e5fde..00000000 --- a/ansible/roles/elastic.beats/.ci/jobs/defaults.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- - -##### GLOBAL METADATA - -- meta: - cluster: devops-ci - -##### JOB DEFAULTS - -- job: - project-type: matrix - logrotate: - daysToKeep: 30 - numToKeep: 100 - parameters: - - string: - name: branch_specifier - default: master - description: the Git branch specifier to build (<branchName>, <tagName>, - <commitId>, etc.) - properties: - - github: - url: https://github.com/elastic/ansible-beats/ - - inject: - properties-content: HOME=$JENKINS_HOME - concurrent: true - node: master - scm: - - git: - name: origin - credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba - reference-repo: /var/lib/jenkins/.git-references/ansible-beats.git - branches: - - ${branch_specifier} - url: git@github.com:elastic/ansible-beats.git - basedir: ansible-beats - wipe-workspace: 'False' - axes: - - axis: - type: slave - name: label - values: - - linux - - axis: - name: OS - filename: ansible-beats/test/matrix.yml - type: yaml - - axis: - name: TEST_TYPE - filename: ansible-beats/test/matrix.yml - type: yaml - wrappers: - - ansicolor - - timeout: - type: absolute - timeout: 360 - fail: true - - timestamps diff --git a/ansible/roles/elastic.beats/.ci/jobs/elastic+ansible-beats+master.yml b/ansible/roles/elastic.beats/.ci/jobs/elastic+ansible-beats+master.yml deleted file mode 100644 index 5d18f0f2..00000000 --- a/ansible/roles/elastic.beats/.ci/jobs/elastic+ansible-beats+master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- job: - name: elastic+ansible-beats+master - display-name: elastic / ansible-beats - master - description: Master branch testing with test kitchen - triggers: - - timed: H H(02-04) * * * - builders: - - shell: |- - #!/usr/local/bin/runbld - set -euo pipefail - - export RBENV_VERSION='2.5.7' - export PATH="$HOME/.rbenv/bin:$PATH" - eval "$(rbenv init -)" - rbenv local $RBENV_VERSION - - make setup - make verify PATTERN=$TEST_TYPE-$OS - publishers: - - slack: - notify-back-to-normal: True - notify-every-failure: True - room: infra-release-notify - team-domain: elastic - auth-token-id: release-slack-integration-token - auth-token-credential-id: release-slack-integration-token diff --git a/ansible/roles/elastic.beats/.ci/jobs/elastic+ansible-beats+pull-request.yml b/ansible/roles/elastic.beats/.ci/jobs/elastic+ansible-beats+pull-request.yml deleted file mode 100644 index 693e39c8..00000000 --- a/ansible/roles/elastic.beats/.ci/jobs/elastic+ansible-beats+pull-request.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- job: - name: elastic+ansible-beats+pull-request - display-name: elastic / ansible-beats - pull-request - description: Pull request testing with test kitchen - parameters: [] - scm: - - git: - branches: - - $ghprbActualCommit - refspec: +refs/pull/*:refs/remotes/origin/pr/* - triggers: - - github-pull-request: - github-hooks: true - org-list: - - elastic - allow-whitelist-orgs-as-admins: true - cancel-builds-on-update: true - status-context: devops-ci - builders: - - shell: |- - #!/usr/local/bin/runbld - set -euo pipefail - - export RBENV_VERSION='2.5.7' - export PATH="$HOME/.rbenv/bin:$PATH" - eval "$(rbenv init -)" - rbenv local $RBENV_VERSION - - make setup - make verify PATTERN=$TEST_TYPE-$OS diff --git a/ansible/roles/elastic.beats/.github/issue_template.md b/ansible/roles/elastic.beats/.github/issue_template.md deleted file mode 100644 index e381433b..00000000 --- a/ansible/roles/elastic.beats/.github/issue_template.md +++ /dev/null @@ -1,40 +0,0 @@ - - - - -**Describe the feature**: - - - -**Beats product**: - -**Beats version** - -**Role version**: (If using master please specify github sha) - -**OS version** (`uname -a` if on a Unix-like system): - -**Description of the problem including expected versus actual behaviour**: - -**Playbook**: -Please specify the full playbook used to reproduce this issue. - -**Provide logs from Ansible**: - -**Beats logs if relevant**: diff --git a/ansible/roles/elastic.beats/.github/stale.yml b/ansible/roles/elastic.beats/.github/stale.yml deleted file mode 100644 index 75614ae4..00000000 --- a/ansible/roles/elastic.beats/.github/stale.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 90 - -# Number of days of inactivity before an stale issue is closed -daysUntilClose: 30 - -# Label to use when marking an issue as stale -staleLabel: triage/stale - -issues: - # Comment to post when marking an issue as stale. - markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank - you for your contributions. - # Comment to post when closing a stale issue. - closeComment: > - This issue has been automatically closed because it has not had recent - activity since being marked as stale. -pulls: - # Comment to post when marking a PR as stale. - markComment: > - This PR has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. - - To track this PR (even if closed), please open a corresponding issue if one - does not already exist. - # Comment to post when closing a stale PR. - closeComment: > - This PR has been automatically closed because it has not had recent - activity since being marked as stale. - - Please reopen when work resumes. diff --git a/ansible/roles/elastic.beats/.gitignore b/ansible/roles/elastic.beats/.gitignore deleted file mode 100644 index fdf3143d..00000000 --- a/ansible/roles/elastic.beats/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -.kitchen/ -*.pyc -.vendor -.bundle -Converging -TODO -.idea/ -beats.iml -Dockerfile-* \ No newline at end of file diff --git a/ansible/roles/elastic.beats/.kitchen.yml b/ansible/roles/elastic.beats/.kitchen.yml deleted file mode 100644 index 23bf1083..00000000 --- a/ansible/roles/elastic.beats/.kitchen.yml +++ /dev/null @@ -1,136 +0,0 @@ ---- -driver: - name: docker - -transport: - max_ssh_sessions: 6 - -provisioner: - name: ansible_playbook - hosts: localhost - roles_path: ./ - require_ansible_repo: true - ansible_verbose: true - idempotency_test: true - -platforms: - - name: ubuntu-16.04 - driver_config: - image: ubuntu:16.04 - privileged: true - provision_command: - - apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible - - apt-get update && apt-get -y -q install ansible python-apt python-pycurl - use_sudo: false - - name: ubuntu-18.04 - driver_config: - image: ubuntu:18.04 - privileged: true - provision_command: - - apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible - - apt-get update && apt-get -y -q install ansible python-apt python-pycurl - - mkdir -p /run/sshd - use_sudo: false - - name: ubuntu-20.04 - driver_config: - image: ubuntu:20.04 - privileged: true - provision_command: - - apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible - - apt-get update && apt-get -y -q install ansible python-apt python-pycurl - use_sudo: false - - name: debian-8 - driver_config: - image: debian:8 - privileged: true - provision_command: - - echo "deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main" > /etc/apt/sources.list.d/ansible.list - - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367 - - apt-get update && apt-get -y install ansible - use_sudo: false - - name: debian-9 - driver_config: - image: debian:9 - privileged: true - provision_command: - - apt-get update && apt-get -y install gnupg2 - - echo "deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main" > /etc/apt/sources.list.d/ansible.list - - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367 - - apt-get update && apt-get -y install ansible - use_sudo: false - - name: debian-10 - driver_config: - image: debian:10 - privileged: true - provision_command: - - apt-get update && apt-get -y install gnupg2 - - echo "deb http://ppa.launchpad.net/ansible/ansible/ubuntu trusty main" > /etc/apt/sources.list.d/ansible.list - - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 93C4A3FD7BB9C367 - - apt-get update && apt-get -y install ansible - use_sudo: false - - name: centos-7 - driver_config: - image: centos:7 - provision_command: - - yum -y install epel-release - - yum -y install ansible - run_command: "/usr/sbin/init" - privileged: true - use_sudo: false - - name: centos-8 - driver_config: - image: centos:8 - provision_command: - - yum -y install epel-release - - yum -y install ansible - run_command: "/usr/sbin/init" - privileged: true - use_sudo: false - - name: amazonlinux-2 - driver_config: - image: amazonlinux:2 - provision_command: - - yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - - yum -y install ansible - volume: - - <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json - - /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers - run_command: "/usr/sbin/init" - privileged: true - use_sudo: false -suites: - - name: standard - provisioner: - playbook: test/integration/standard.yml - additional_copy_path: - - "." - run_list: - attributes: - - name: standard-6x - provisioner: - playbook: test/integration/standard-6x.yml - additional_copy_path: - - "." - run_list: - attributes: - - name: multi - provisioner: - playbook: test/integration/multi.yml - additional_copy_path: - - "." - run_list: - attributes: - - name: config - provisioner: - playbook: test/integration/config.yml - additional_copy_path: - - "." - run_list: - attributes: - - name: oss - provisioner: - playbook: test/integration/oss.yml - additional_copy_path: - - "." - run_list: - attributes: diff --git a/ansible/roles/elastic.beats/.ruby-version b/ansible/roles/elastic.beats/.ruby-version deleted file mode 100644 index 35d16fb1..00000000 --- a/ansible/roles/elastic.beats/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -2.5.7 diff --git a/ansible/roles/elastic.beats/CHANGELOG.md b/ansible/roles/elastic.beats/CHANGELOG.md deleted file mode 100644 index a1ad4134..00000000 --- a/ansible/roles/elastic.beats/CHANGELOG.md +++ /dev/null @@ -1,203 +0,0 @@ -# Changelog - -## 7.12.0 - -* 7.12.0 as default version. -* 6.8.15 as 6.x tested version - - -| PR | Author | Title | -| --- | --- | --- | -| [#138](https://github.com/elastic/ansible-beats/pull/138) | [@jmlrt](https://github.com/jmlrt) | [meta] fix changelog after 7.11.2 release | - - -## 7.11.2 - -* 7.11.2 as default version. - -| PR | Author | Title | -| --- | --- | --- | -| [#135](https://github.com/elastic/ansible-beats/pull/135) | [@v1v](https://github.com/v1v) | Update metadata reference for CentOS 8 | -| [#134](https://github.com/elastic/ansible-beats/pull/134) | [@v1v](https://github.com/v1v) | Remove Ubuntu-14.04 support | -| [#118](https://github.com/elastic/ansible-beats/pull/118) | [@v1v](https://github.com/v1v) | Support ubuntu-20 | -| [#116](https://github.com/elastic/ansible-beats/pull/116) | [@v1v](https://github.com/v1v) | Support debian 10 | -| [#131](https://github.com/elastic/ansible-beats/pull/131) | [@jmlrt](https://github.com/jmlrt) | Copy ILM policy file with root permission | - - -## 7.11.1 - -* 7.11.1 as default version. -* 6.8.14 as 6.x tested version - -## 7.10.2 - -* 7.10.2 as default version. - - -| PR | Author | Title | -| --- | --- | --- | -| [#123](https://github.com/elastic/ansible-beats/pull/123) | [@jmlrt](https://github.com/jmlrt) | Cleanup init_script variable | - - -## 7.10.1 - -* 7.10.1 as default version. - - -| PR | Author | Title | -| --- | --- | --- | -| [#115](https://github.com/elastic/ansible-beats/pull/115) | [@v1v](https://github.com/v1v) | Support CentOS-8 | -| [#120](https://github.com/elastic/ansible-beats/pull/120) | [@jmlrt](https://github.com/jmlrt) | Remove CentOS 6 support | - - -## 7.10.0 - -* 7.10.0 as default version. - - -| PR | Author | Title | -| --- | --- | --- | -| [#113](https://github.com/elastic/ansible-beats/pull/113) | [@jmlrt](https://github.com/jmlrt) | [meta] clean deprecated bumper script | - - -## 7.9.3 - -* 7.9.3 as default version. -* 6.8.13 as 6.x tested version - -## 7.9.2 - 2020/09/24 - -* 7.9.2 as default version - -## 7.9.1 - 2020/09/03 - -* 7.9.1 as default version - -## 7.9.0 - 2020/08/18 - -* 7.9.0 as default version -* 6.8.12 as 6.x tested version - -## 7.8.1 - 2020/07/28 - -* 7.8.1 as default version -* 6.8.11 as 6.x tested version - -| PR | Author | Title | -|---------------------------------------------------------|------------------------------------|--------------------------| -| [#89](https://github.com/elastic/ansible-beats/pull/89) | [@jmlrt](https://github.com/jmlrt) | Add amazonlinux2 support | - - -## 7.8.0 - 2020/06/18 - -* 7.8.0 as default version - -## 7.7.1 - 2020/06/04 - -* 7.7.1 as default version -* 6.8.10 as 6.x tested version - -## 7.7.0 - 2020/05/13 - -* 7.7.0 as default version -* 6.8.9 as 6.x tested version -* Fix CentOS tests in [#86](https://github.com/elastic/ansible-beats/pull/86) ([@jmlrt](https://github.com/jmlrt)) - -| PR | Author | Title | -|---------------------------------------------------------|------------------------------------------|---------------------------------------------| -| [#84](https://github.com/elastic/ansible-beats/pull/84) | [@kravietz](https://github.com/kravietz) | Minor formatting fixes to pass ansible-lint | - - -## 7.6.2 - 2020/03/31 - -* 7.6.2 as default version -* 6.8.8 as 6.x tested version - -| PR | Author | Title | -|---------------------------------------------------------|------------------------------------|---------------------------------------------------------------------------| -| [#77](https://github.com/elastic/ansible-beats/pull/77) | [@jmlrt](https://github.com/jmlrt) | Add become to individual tasks | -| [#75](https://github.com/elastic/ansible-beats/pull/75) | [@ktibi](https://github.com/ktibi) | Add option to disable the repo installation and lock package installation | -| [#78](https://github.com/elastic/ansible-beats/pull/78) | [@astik](https://github.com/astik) | Aad task to create directory for default policies | - - -## 7.6.1 - 2020/03/04 - -* 7.6.1 as default version - - -## 7.6.0 - 2020/02/11 - -* 7.6.0 as default version - -| PR | Author | Title | -|---------------------------------------------------------|--------------------------------------------------------|------------------------------------| -| [#69](https://github.com/elastic/ansible-beats/pull/69) | [@dependabot[bot]](https://github.com/apps/dependabot) | Bump rubyzip from 1.2.2 to 2.0.0 | -| [#71](https://github.com/elastic/ansible-beats/pull/71) | [@jmlrt](https://github.com/jmlrt) | Fix filebeat example configuration | -| [#72](https://github.com/elastic/ansible-beats/pull/72) | [@beand](https://github.com/beand) | Fixed typo | - - -## 7.5.2 - 2020/01/21 - -* 7.5.2 as default version - -| PR | Author | Title | -|---------------------------------------------------------|------------------------------------|-----------------------------------------------| -| [#66](https://github.com/elastic/ansible-beats/pull/66) | [@jmlrt](https://github.com/jmlrt) | [doc] switched relative URLs to absolute URLs | -| [#67](https://github.com/elastic/ansible-beats/pull/67) | [@jmlrt](https://github.com/jmlrt) | [ci] bump ruby to 2.5.7 | - - -## 7.5.1 - 2019/12/18 - -* 7.5.1 as default version -* 6.8.6 as 6.x tested version - -| PR | Author | Title | -|---------------------------------------------------------|----------------------------------------------------|--------------------------| -| [#61](https://github.com/elastic/ansible-beats/pull/61) | [@robsonpeixoto](https://github.com/robsonpeixoto) | Allow use oss repository | - - -## 7.5.0 - 2019/12/02 - -* 7.5.0 as default version -* 6.8.5 as 6.x tested version in [#57](https://github.com/elastic/ansible-beats/pull/57) [@jmlrt](https://github.com/jmlrt) - -| PR | Author | Title | -|---------------------------------------------------------|--------------------------------------------------|-----------------------------------------------------------------| -| [#50](https://github.com/elastic/ansible-beats/pull/50) | [@jmlrt](https://github.com/jmlrt) | Add bumper script | -| [#55](https://github.com/elastic/ansible-beats/pull/55) | [@tgadiev](https://github.com/tgadiev) | Update syntax to make it compliant to modern ansible-lint rules | -| [#53](https://github.com/elastic/ansible-beats/pull/53) | [@jmlrt](https://github.com/jmlrt) | Indent yaml for config file | -| [#51](https://github.com/elastic/ansible-beats/pull/51) | [@ktibi](https://github.com/ktibi) | Rename the handlers | -| [#59](https://github.com/elastic/ansible-beats/pull/59) | [@MartinVerges](https://github.com/MartinVerges) | Beat config improvements | - - -## 7.4.1 - 2019/10/23 - -* 7.4.1 as default version -* 6.8.4 as 6.x tested version - -| PR | Author | Title | -|---------------------------------------------------------|------------------------------------|---------------------| -| [#48](https://github.com/elastic/ansible-beats/pull/48) | [@jmlrt](https://github.com/jmlrt) | Fix probot newlines | - - -## 7.4.0 - 2019/10/01 - -* 7.4.0 as default version - -| PR | Author | Title | -|---------------------------------------------------------|------------------------------------------|---------------------------------------------------------------------| -| [#25](https://github.com/elastic/ansible-beats/pull/25) | [@jmlrt](https://github.com/jmlrt) | Update kitchen Gem dependencies | -| [#6](https://github.com/elastic/ansible-beats/pull/6) | [@levonet](https://github.com/levonet) | Remove `beat_install` variable | -| [#32](https://github.com/elastic/ansible-beats/pull/32) | [@astik](https://github.com/astik) | Remove unused `es_conf_dir` variable | -| [#33](https://github.com/elastic/ansible-beats/pull/33) | [@astik](https://github.com/astik) | Replace custom filter with yaml handling | -| [#10](https://github.com/elastic/ansible-beats/pull/10) | [@Meecr0b](https://github.com/Meecr0b) | Move the `repo_key` configuration to a variable | -| [#34](https://github.com/elastic/ansible-beats/pull/34) | [@nyetwurk](https://github.com/nyetwurk) | Make sure the right beat service gets restarted | -| [#38](https://github.com/elastic/ansible-beats/pull/38) | [@jmlrt](https://github.com/jmlrt) | Add probot config to manage stale issues/pr + GH issue template | -| [#40](https://github.com/elastic/ansible-beats/pull/40) | [@nyetwurk](https://github.com/nyetwurk) | Make beats `repo_key` variable a unique name less likely to collide | -| [#41](https://github.com/elastic/ansible-beats/pull/41) | [@jmlrt](https://github.com/jmlrt) | Enhance ansible-beats documentation | - - -## 7.0.0 - 2019/05/09 - -* First release -* 7.0.0 as default version diff --git a/ansible/roles/elastic.beats/Gemfile b/ansible/roles/elastic.beats/Gemfile deleted file mode 100644 index bf42881a..00000000 --- a/ansible/roles/elastic.beats/Gemfile +++ /dev/null @@ -1,6 +0,0 @@ -source 'https://rubygems.org' - -gem 'test-kitchen' -gem 'kitchen-docker' -gem 'kitchen-ansible' -gem 'net-ssh' diff --git a/ansible/roles/elastic.beats/Gemfile.lock b/ansible/roles/elastic.beats/Gemfile.lock deleted file mode 100644 index 0796e484..00000000 --- a/ansible/roles/elastic.beats/Gemfile.lock +++ /dev/null @@ -1,117 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - bcrypt_pbkdf (1.0.1) - builder (3.2.4) - ed25519 (1.2.4) - equatable (0.5.0) - erubi (1.9.0) - ffi (1.12.1) - gssapi (1.3.0) - ffi (>= 1.0.1) - gyoku (1.3.1) - builder (>= 2.1.2) - httpclient (2.8.3) - kitchen-ansible (0.50.0) - net-ssh (>= 3) - test-kitchen (>= 1.4) - kitchen-docker (2.9.0) - test-kitchen (>= 1.0.0) - license-acceptance (1.0.11) - pastel (~> 0.7) - tomlrb (~> 1.2) - tty-box (~> 0.3) - tty-prompt (~> 0.18) - little-plugger (1.1.4) - logging (2.2.2) - little-plugger (~> 1.1) - multi_json (~> 1.10) - mixlib-install (3.11.18) - mixlib-shellout - mixlib-versioning - thor - mixlib-shellout (2.4.4) - mixlib-versioning (1.2.7) - multi_json (1.14.1) - necromancer (0.4.0) - net-scp (2.0.0) - net-ssh (>= 2.6.5, < 6.0.0) - net-ssh (5.2.0) - net-ssh-gateway (2.0.0) - net-ssh (>= 4.0.0) - nori (2.6.0) - pastel (0.7.2) - equatable (~> 0.5.0) - tty-color (~> 0.4.0) - rubyntlm (0.6.2) - rubyzip (2.0.0) - strings (0.1.5) - strings-ansi (~> 0.1) - unicode-display_width (~> 1.5) - unicode_utils (~> 1.4) - strings-ansi (0.1.0) - test-kitchen (2.2.5) - bcrypt_pbkdf (~> 1.0) - ed25519 (~> 1.2) - license-acceptance (~> 1.0, >= 1.0.11) - mixlib-install (~> 3.6) - mixlib-shellout (>= 1.2, < 3.0) - net-scp (>= 1.1, < 3.0) - net-ssh (>= 2.9, < 6.0) - net-ssh-gateway (>= 1.2, < 3.0) - thor (~> 0.19) - winrm (~> 2.0) - winrm-elevated (~> 1.0) - winrm-fs (~> 1.1) - thor (0.20.3) - timers (4.3.0) - tomlrb (1.2.8) - tty-box (0.3.0) - pastel (~> 0.7.2) - strings (~> 0.1.4) - tty-cursor (~> 0.6.0) - tty-color (0.4.3) - tty-cursor (0.6.1) - tty-prompt (0.18.1) - necromancer (~> 0.4.0) - pastel (~> 0.7.0) - timers (~> 4.0) - tty-cursor (~> 0.6.0) - tty-reader (~> 0.5.0) - tty-reader (0.5.0) - tty-cursor (~> 0.6.0) - tty-screen (~> 0.6.4) - wisper (~> 2.0.0) - tty-screen (0.6.5) - unicode-display_width (1.6.0) - unicode_utils (1.4.0) - winrm (2.3.4) - builder (>= 2.1.2) - erubi (~> 1.8) - gssapi (~> 1.2) - gyoku (~> 1.0) - httpclient (~> 2.2, >= 2.2.0.2) - logging (>= 1.6.1, < 3.0) - nori (~> 2.0) - rubyntlm (~> 0.6.0, >= 0.6.1) - winrm-elevated (1.1.1) - winrm (~> 2.0) - winrm-fs (~> 1.0) - winrm-fs (1.3.4) - erubi (~> 1.8) - logging (>= 1.6.1, < 3.0) - rubyzip (~> 2.0) - winrm (~> 2.0) - wisper (2.0.0) - -PLATFORMS - ruby - -DEPENDENCIES - kitchen-ansible - kitchen-docker - net-ssh - test-kitchen - -BUNDLED WITH - 1.17.0 diff --git a/ansible/roles/elastic.beats/LICENSE b/ansible/roles/elastic.beats/LICENSE deleted file mode 100644 index 0455cacd..00000000 --- a/ansible/roles/elastic.beats/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2012-2016 Elasticsearch - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/ansible/roles/elastic.beats/Makefile b/ansible/roles/elastic.beats/Makefile deleted file mode 100644 index 35febc4b..00000000 --- a/ansible/roles/elastic.beats/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -default: build - -SHELL:=/bin/bash -eux -PATTERN := standard-ubuntu-1804 - -.PHONY: converge verify test login destroy list - -setup: - bundle install - docker ps - -converge: - bundle exec kitchen converge $(PATTERN) - -verify: - bundle exec kitchen verify $(PATTERN) - -test: - bundle exec kitchen test $(PATTERN) --destroy=always - -login: - bundle exec kitchen login $(PATTERN) - -destroy: - bundle exec kitchen destroy $(PATTERN) - -destroy-all: - bundle exec kitchen destroy - -list: - bundle exec kitchen list diff --git a/ansible/roles/elastic.beats/README.md b/ansible/roles/elastic.beats/README.md deleted file mode 100644 index b42fbf83..00000000 --- a/ansible/roles/elastic.beats/README.md +++ /dev/null @@ -1,218 +0,0 @@ -# ansible-beats -[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+ansible-beats+master.svg)](https://devops-ci.elastic.co/job/elastic+ansible-beats+master/) -[![Ansible Galaxy](https://img.shields.io/badge/ansible--galaxy-elastic.beats-blue.svg)](https://galaxy.ansible.com/elastic/beats/) - -This role provides a generic means of installing Elastic supported Beats - -**Tested Beats** - -* Filebeat -* MetricBeat (TopBeat in 1.x) -* Packetbeat - -**Tested Versions** - -* 7.x -* 6.x - -**Tested Platforms** - -* Ubuntu 16.04 -* Ubuntu 18.04 -* Ubuntu 20.04 -* Debian 8 -* Debian 9 -* Debian 10 -* CentOS 7 -* CentOS 8 -* Amazon Linux 2 - -## Usage - -Create your Ansible playbook with your own tasks, and include the role beats. You will have to have this repository accessible within the context of playbook. - -```sh -ansible-galaxy install elastic.beats,v7.12.0 -``` - -Then create your playbook yaml adding the role beats. -The application of the beats role results in the installation of a node on a host. - -The simplest configuration therefore consists of: - -```yaml - hosts: localhost - roles: - - role: elastic.beats - vars: - beats_version: 7.12.0 - beat: filebeat - beat_conf: - filebeat: - inputs: - - type: log - enabled: true - paths: - - /var/log/*.log -``` - -The above installs Filebeat 7.12.0 on the hosts 'localhost'. - -**Notes**: -- Beats default version is described in [`beats_version`](https://github.com/elastic/ansible-beats/blob/master/defaults/main.yml#L4). You can override this variable in your playbook to install another version. -While we are testing this role only with one 7.x and one 6.x version (respectively [7.12.0](https://github.com/elastic/ansible-beats/blob/master/defaults/main.yml#L4) and [6.8.15](https://github.com/elastic/ansible-beats/blob/master/test/integration/standard-6x.yml#L7) at the time of writing), this role should work with others version also in most cases. -- Beat product is described in `beat` variable. While currently tested Beats are Filebeat, Metricbeat & Packetbeat, this role should work also with other member of [The Beats Family](https://www.elastic.co/products/beats) in most cases. - -## Testing - -This playbook uses [Kitchen](https://kitchen.ci/) for CI and local testing. - -### Requirements - -* Ruby -* Bundler -* Docker -* Make - -### Running the tests - -To converge an Ubuntu 18.04 host -```sh -$ make converge -``` - -To run the tests -```sh -$ make verify -``` - -To list all of the different test suits -```sh -$ make list -``` - -The default test suite is Ubuntu 18.04. If you want to test another suite you can override this with the `PATTERN` variable -```sh -$ make converge PATTERN=standard-centos-7 -``` - -The `PATTERN` is a kitchen pattern which can match multiple suites. To run all tests for CentOS -```sh -$ make converge PATTERN=centos-7 -``` - -When you are finished testing you can clean up everything with -```sh -$ make destroy-all -``` - -### Basic Beats configuration - -All Beats configuration parameters are supported. This is achieved using a configuration map parameter `beat_conf` which is serialized into the `${beat}.yml` file. -The use of a map ensures the Ansible playbook does not need to be updated to reflect new/deprecated/plugin configuration parameters. - -In addition to the `beat_conf` map, several other parameters are supported for additional functions e.g. script installation. These can be found in the role's `defaults/main.yml` file. - -The following illustrates applying configuration parameters to Packetbeat instance. - -```yaml -- name: Example playbook for installing packetbeat - hosts: localhost - roles: - - { role: beats, beat: "packetbeat", - beat_conf: { - "interfaces": {"device":"any"}, - "protocols": { - "dns": { - "ports": [53], - "include_authorities":true - }, - "http": { - "ports": [80, 8080, 8000, 5000, 8002] - }, - "memcache": { - "ports": [11211] - }, - "mysql": { - "ports": [3306] - }, - "pgsql": { - "ports": [5432] - }, - "redis": { - "ports": [6379] - }, - "thrift": { - "ports": [9090] - }, - "mongodb": { - "ports": [27017] - } - } - }, - output_conf : { - "elasticsearch": { - "hosts": ["localhost:9200"] - } - } - } - vars: - use_repository: "true" -``` - -### Additional Configuration - -Supported variables are as follows: - -- **beat** (*MANDATORY*): Beat product. Supported values are: "filebeat", "metricbeat" & "packetbeat" (others beats from [The Beats Family](https://www.elastic.co/products/beats) should work in most cases but aren't currently tested). -- **beat_conf** (*MANDATORY*): Beat Configuration. Should be defined as a map. -- **beats_version** (*Defaults to `7.12.0`*): Beats version. -- **version_lock** (*Defaults to `false`*): Locks the installed version if set to true, thus preventing other processes from updating. This will not impact the roles ability to update the beat on subsequent runs (it unlocks and re-locks if required). -- **use_repository** (*Defaults to `true`*): Use elastic repo for yum or apt if true. If false, a custom custom_package_url must be provided. -- **beats_add_repository** (*Defaults to `{use_repository}`*): Install elastic repo for yum or apt if true. If false, the present repositories will be used. Useful if you already have beats packages in your repo. -- **start_service** (*Defaults to `true`*): service will be started if true, false otherwise. -- **restart_on_change** (*Defaults to `true`*): Changes to configuration or installed versions, will result in a restart if true. -- **daemon_args** (*Applicable to version 1.x of beats*): Allows run time params to be passed to beats. -- **logging_conf** (*Defaults to `{"files":{"rotateeverybytes":10485760}}`*): Logging configuration. Should be defined as a map. Map is serialized into logging section of beat config. -- **shipper_conf** (*Applicable to version 1.x of beats*): Shipper configuration. Should be defined as a map . Map is serialized into shipper section of beat config. -- **output_conf** (*Defaults to `{"elasticsearch":{"hosts":["localhost:9200"]}}`*): Output configuration. Map is serialized into output section of beat config. -- **beats_pid_dir** (*Defaults to `/var/run`*): Location of beats pid file. -- **beats_conf_dir** (*Defaults to `/etc/{beat}`*): Location of conf directory for beats configuration file. -- **default_ilm_policy** (*Defaults undefined*): local path to default policy if any custom one is defined - -### Focus on ILM - -By default, *beat* will create a default policy defined as part of the beat being deployed. -You can override default ILM setup by defining ILM conf as part of *beat_conf*. -For example: - -``` -- role: ansible-beats - beat: metricbeat - beat_conf: - setup: - ilm: - policy_file: /etc/filebeat/policies/my-default-metricbeat.json - overwrite: true - metricbeat.modules: - ... - default_ilm_policy: conf/my-default-metricbeat.json - become: yes -``` - -This will copy *conf/my-default-filebeat.json* to */etc/filebeat/policies/my-default-filebeat.json*. -This policy will be used as default one for this beat. - -## License - -Apache 2.0 - -## Limitations - -Multiple instances of the same beat cannot be installed on the same target server. - -## Questions on Usage - -We welcome questions on how to use the role. However, in order to keep the GitHub issues list focused on "issues" we ask the community to raise questions at https://discuss.elastic.co/c/beats. This is monitored by the maintainers. - -Community Contributions always appreciated and welcome! Please ensure all contributions include tests as appropriate. diff --git a/ansible/roles/elastic.beats/ansible.cfg b/ansible/roles/elastic.beats/ansible.cfg deleted file mode 100644 index d9a8c501..00000000 --- a/ansible/roles/elastic.beats/ansible.cfg +++ /dev/null @@ -1 +0,0 @@ -[defaults] \ No newline at end of file diff --git a/ansible/roles/elastic.beats/defaults/main.yml b/ansible/roles/elastic.beats/defaults/main.yml deleted file mode 100644 index 3620eeed..00000000 --- a/ansible/roles/elastic.beats/defaults/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# defaults file for beats -beats_version: 7.12.0 -oss_version: false -version_lock: false -use_repository: true -beats_add_repository: "{{ use_repository }}" -start_service: true -restart_on_change: true -daemon_args: "" -logging_conf: {"files":{"rotateeverybytes":10485760}} -output_conf: {"elasticsearch":{"hosts":["localhost:9200"]}} -beats_pid_dir: "/var/run" -beats_conf_dir: "/etc/{{beat}}" diff --git a/ansible/roles/elastic.beats/handlers/main.yml b/ansible/roles/elastic.beats/handlers/main.yml deleted file mode 100644 index 8fe8af38..00000000 --- a/ansible/roles/elastic.beats/handlers/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# handlers file for beats - -- name: restart the service - become: yes - service: - name: "{{ beat_product }}" - state: restarted - enabled: true - when: start_service and restart_on_change and not beats_started.changed diff --git a/ansible/roles/elastic.beats/meta/.galaxy_install_info b/ansible/roles/elastic.beats/meta/.galaxy_install_info deleted file mode 100644 index 5bd452bd..00000000 --- a/ansible/roles/elastic.beats/meta/.galaxy_install_info +++ /dev/null @@ -1,2 +0,0 @@ -install_date: Sat Apr 10 14:18:16 2021 -version: v7.12.0 diff --git a/ansible/roles/elastic.beats/meta/main.yml b/ansible/roles/elastic.beats/meta/main.yml deleted file mode 100644 index db6259a9..00000000 --- a/ansible/roles/elastic.beats/meta/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -allow_duplicates: true -dependencies: [] -galaxy_info: - role_name: beats - author: Dale McDiarmid - description: Beats for Linux - company: "Elastic.co" - issue_tracker_url: https://github.com/elastic/ansible-beats/issues - license: "license (Apache)" - min_ansible_version: 2.0 - platforms: - - name: EL - versions: - - 7 - - 8 - - name: Debian - versions: - - all - - name: Ubuntu - versions: - - all - galaxy_tags: - - beats - - elastic - - elk - - logging - - monitoring diff --git a/ansible/roles/elastic.beats/tasks/beats-config.yml b/ansible/roles/elastic.beats/tasks/beats-config.yml deleted file mode 100644 index a2ce7bae..00000000 --- a/ansible/roles/elastic.beats/tasks/beats-config.yml +++ /dev/null @@ -1,87 +0,0 @@ ---- -# Configure Beats Node - -- name: Set default facts - set_fact: - pid_file: '{{ beats_pid_dir }}/{{ beat }}.pid' - instance_default_file: '{{ default_file }}/{{ beat }}' - conf_file: '{{ beats_conf_dir }}/{{ beat }}.yml' - beat_output_conf: - output: '{{ output_conf }}' - -- name: Set beat_shipper_conf - set_fact: - beat_shipper_conf: - shipper: '{{ shipper_conf }}' - when: shipper_conf is defined - -- name: Set beat_logging_conf - set_fact: - beat_logging_conf: - logging: '{{ logging_conf }}' - -- name: Check pid_dir status - stat: - path: '{{ beats_pid_dir }}' - register: pid_stat - -- name: Create PID Directory - become: yes - file: - path: '{{ beats_pid_dir }}' - state: directory - when: pid_stat.stat.isdir is not defined or pid_stat.stat.islnk is not defined - -# fail if pid and config directories are not links or not directories i.e files - -- name: Create Config Directory - become: yes - file: - path: '{{ beats_conf_dir }}' - state: directory - -# Copy the default file -- name: Copy Default File for Instance - become: yes - template: - src: beat.j2 - dest: '{{ instance_default_file }}' - mode: 0644 - force: true - owner: root - group: root - notify: restart the service - -# Copy templated config file -- name: Copy Configuration File for {{ beat }} - become: yes - template: - src: beat.yml.j2 - dest: '{{ conf_file }}' - mode: 0644 - force: true - owner: root - group: root - notify: restart the service - -# Copy default ILM policy file -- name: Create default policies config directory - become: yes - file: - path: '{{ beat_conf.setup.ilm.policy_file | dirname }}' - state: directory - mode: 0755 - owner: root - group: root - when: default_ilm_policy is defined - -- name: Copy default ILM policy file for {{ beat }} - become: yes - copy: - src: '{{default_ilm_policy}}' - dest: '{{ beat_conf.setup.ilm.policy_file }}' - mode: 0644 - owner: root - group: root - when: default_ilm_policy is defined - notify: restart the service diff --git a/ansible/roles/elastic.beats/tasks/beats-debian.yml b/ansible/roles/elastic.beats/tasks/beats-debian.yml deleted file mode 100644 index f86268c2..00000000 --- a/ansible/roles/elastic.beats/tasks/beats-debian.yml +++ /dev/null @@ -1,100 +0,0 @@ ---- - -- name: Debian - Ensure apt-transport-https is installed - become: yes - apt: - name: apt-transport-https - state: present - cache_valid_time: 86400 - when: use_repository | bool - register: beat_install - until: beat_install is succeeded - notify: restart the service - -- name: Debian - Ensure python-urllib3, python-openssl, python-pyasn1 & python-pip are installed - become: yes - apt: - name: - - python-urllib3 - - python-openssl - - python-pyasn1 - - python-pip - state: present - register: libs_install - until: libs_install is succeeded - when: - - use_repository | bool - - ansible_distribution_release == "trusty" - -- name: Debian - ensure ndg-httpsclient pip is installed - become: yes - pip: - name: ndg-httpsclient - state: present - register: ndg_install - until: ndg_install is succeeded - when: - - use_repository | bool - - ansible_distribution_release == "trusty" - -- name: Debian - Add Beats repository key - become: yes - apt_key: - url: '{{ elastic_repo_key }}' - state: present - register: apt_key_install - until: apt_key_install is succeeded - when: beats_add_repository | bool - -- name: Debian - add beats repository - become: yes - apt_repository: - repo: 'deb {{ repo_url }} stable main' - state: present - register: repo_install - until: repo_install is succeeded - when: beats_add_repository | bool - -- name: Debian - unhold {{ beat }} version for install - become: yes - command: apt-mark unhold {{ beat }} - changed_when: false - -- name: Debian - Ensure {{ beat }} is installed - become: yes - apt: - name: >- - {{ beat }}{% if beats_version is defined and beats_version|length>0 %}={{ beats_version }}{% endif %} - state: present - cache_valid_time: 86400 - register: beat_install - until: beat_install is succeeded - when: use_repository | bool - notify: restart the service - -- name: Debian - hold {{ beat }} version - become: yes - command: apt-mark hold {{ beat }} - when: version_lock - changed_when: false - -- name: Set os_arch - set_fact: - os_arch: >- - {{ ansible_architecture == 'x86_64' | ternary('amd64', 'i386') }} - -- name: Debian - Download {{ beat }} from url - get_url: - url: >- - {% if custom_package_url is defined %}{{ custom_package_url }}{% - else %}{{ beats_package_url }}/{{ beat }}/{{ beat }}_{{ beats_version }}_{{ os_arch }}.deb{% endif %} - dest: '/tmp/{{ beat }}_{{ beats_version }}_{{ os_arch }}.deb' - validate_certs: false - when: not use_repository | bool - -- name: Debian - Ensure {{ beat }} is installed from downloaded package - become: yes - apt: - deb: '/tmp/{{ beat }}_{{ beats_version }}_{{ os_arch }}.deb' - when: not use_repository | bool - notify: restart the service diff --git a/ansible/roles/elastic.beats/tasks/beats-param-check.yml b/ansible/roles/elastic.beats/tasks/beats-param-check.yml deleted file mode 100644 index 356bad05..00000000 --- a/ansible/roles/elastic.beats/tasks/beats-param-check.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Check beat variable - fail: - msg: "beat must be specified and cannot be blank e.g. filebeat" - when: beat is not defined or (beat | length == 0) - -- name: Check beat_conf variable - fail: - msg: "beat_conf must be specified" - when: beat_conf is not defined - -- name: Check ILM variables - fail: - msg: "beat_conf.setup.ilm.policy_file must be specified if default_ilm_policy is used" - when: default_ilm_policy is defined and beat_conf.setup.ilm.policy_file is not defined - -- name: Set beats_major_version - set_fact: - beats_major_version: '{% if oss_version %}oss-{% endif %}{{ beats_version.split(".")[0] }}.x' diff --git a/ansible/roles/elastic.beats/tasks/beats-redhat.yml b/ansible/roles/elastic.beats/tasks/beats-redhat.yml deleted file mode 100644 index 2e292d6f..00000000 --- a/ansible/roles/elastic.beats/tasks/beats-redhat.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Redhat - add beats repository - become: yes - template: - src: beats.repo.j2 - dest: /etc/yum.repos.d/beats.repo - when: beats_add_repository | bool - -- name: RedHat - install yum-version-lock - become: yes - yum: - name: yum-plugin-versionlock - state: present - update_cache: true - when: version_lock | bool - register: versionlock_install - until: versionlock_install is succeeded - -- name: RedHat - unlock {{ beat }} for install - become: yes - shell: yum versionlock delete {{ beat }} || true - changed_when: false - when: version_lock | bool - tags: - - skip_ansible_lint - -- name: RedHat - Ensure {{ beat }} is installed - become: yes - yum: - name: >- - {{ beat }}{% if beats_version is defined and beats_version|length %}-{{ beats_version }}{% endif %} - state: present - update_cache: true - register: beat_install - until: beat_install is succeeded - when: use_repository | bool - notify: restart the service - -- name: RedHat - lock {{ beat }} version - become: yes - shell: >- - yum versionlock add - {{ beat }}{% if beats_version is defined and beats_version|length %}-{{ beats_version }}{% endif %} - when: version_lock | bool - changed_when: false - tags: - - skip_ansible_lint - -- name: RedHat - Install {{ beat }} from url - become: yes - yum: - name: >- - {% if custom_package_url is defined %}{{ custom_package_url }}{% - else %}{{ beats_package_url }}/{{ beat }}-{{ beats_version }}-{{ ansible_architecture }}.rpm{% endif %} - state: present - register: beat_install - until: beat_install is succeeded - when: not use_repository - notify: restart the service diff --git a/ansible/roles/elastic.beats/tasks/beats.yml b/ansible/roles/elastic.beats/tasks/beats.yml deleted file mode 100644 index 384df822..00000000 --- a/ansible/roles/elastic.beats/tasks/beats.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Install OS specific beats - -- name: Include specific Beats - include_tasks: beats-debian.yml - when: ansible_os_family == 'Debian' - -- name: Include specific Beats - include_tasks: beats-redhat.yml - when: ansible_os_family == 'RedHat' - -# Configuration file for beats -- name: Beats configuration - include_tasks: beats-config.yml - -# Make sure the service is started, and restart if necessary -- name: Start {{ beat_product }} service - become: yes - service: - name: '{{ beat }}' - state: started - enabled: true - when: start_service - register: beats_started diff --git a/ansible/roles/elastic.beats/tasks/main.yml b/ansible/roles/elastic.beats/tasks/main.yml deleted file mode 100644 index 71bc49ee..00000000 --- a/ansible/roles/elastic.beats/tasks/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# tasks file for beats - -- name: check-parameters - include_tasks: beats-param-check.yml - -- name: define beat product - set_fact: - beat_product: "{{ beat }}" - -- name: os-specific vars - include_vars: '{{ ansible_os_family }}.yml' - -- include_tasks: beats.yml - -- name: Force all notified handlers to run at this point, not waiting for normal sync points - meta: flush_handlers diff --git a/ansible/roles/elastic.beats/templates/beat.j2 b/ansible/roles/elastic.beats/templates/beat.j2 deleted file mode 100644 index 0321e8e3..00000000 --- a/ansible/roles/elastic.beats/templates/beat.j2 +++ /dev/null @@ -1,7 +0,0 @@ -################################ -# {{beat}} -################################ - -# Beats PID File -PIDFILE={{pid_file}} -DAEMON_ARGS="-c {{conf_file}} {{daemon_args}}" diff --git a/ansible/roles/elastic.beats/templates/beat.yml.j2 b/ansible/roles/elastic.beats/templates/beat.yml.j2 deleted file mode 100644 index 549b50f6..00000000 --- a/ansible/roles/elastic.beats/templates/beat.yml.j2 +++ /dev/null @@ -1,22 +0,0 @@ -# {{ ansible_managed }} - -################### {{beat}} Configuration ######################### - -############################# {{beat}} ###################################### -{{ beat_conf | to_nice_yaml(indent=2) }} - -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features - -############################# Output ########################################## - -{{ beat_output_conf | to_nice_yaml(indent=2) }} - -{% if shipper_conf is defined %}############################# Shipper ######################################### - -{{ beat_shipper_conf | to_nice_yaml(indent=2) }} -{% endif %} -############################# Logging ######################################### - -{{ beat_logging_conf | to_nice_yaml(indent=2) }} diff --git a/ansible/roles/elastic.beats/templates/beats.repo.j2 b/ansible/roles/elastic.beats/templates/beats.repo.j2 deleted file mode 100644 index 0cb2575a..00000000 --- a/ansible/roles/elastic.beats/templates/beats.repo.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[beats] -name=Elastic Beats Repository -baseurl={{ repo_url }} -enabled=1 -gpgkey={{ elastic_repo_key }} -gpgcheck=1 diff --git a/ansible/roles/elastic.beats/test/integration/config.yml b/ansible/roles/elastic.beats/test/integration/config.yml deleted file mode 100644 index 8b153aaf..00000000 --- a/ansible/roles/elastic.beats/test/integration/config.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -# Install specific version here -- name: wrapper playbook for kitchen testing beats - hosts: localhost - roles: - - role: ansible-beats - beat: packetbeat - version_lock: true - beat_conf: - interfaces: - device: any - protocols: - dns: - ports: - - 53 - include_authorities: true - http: - ports: - - 80 - - 8080 - - 8000 - - 5000 - - 8002 - memcache: - ports: - - 11211 - mysql: - ports: - - 3306 - pgsql: - ports: - - 5432 - redis: - ports: - - 6379 - thrift: - ports: - - 9090 - mongodb: - ports: - - 27017 - output_conf: - elasticsearch: - hosts: ["localhost:9200"] - vars: - use_repository: true diff --git a/ansible/roles/elastic.beats/test/integration/config/serverspec/default_spec.rb b/ansible/roles/elastic.beats/test/integration/config/serverspec/default_spec.rb deleted file mode 100644 index 28477681..00000000 --- a/ansible/roles/elastic.beats/test/integration/config/serverspec/default_spec.rb +++ /dev/null @@ -1,49 +0,0 @@ -require 'spec_helper' - -describe 'Config Tests' do - - describe service('packetbeat') do - it { should be_running } - end - - describe package('packetbeat') do - it { should be_installed } - end - - describe file('/etc/packetbeat/packetbeat.yml') do - it { should be_file } - it { should be_owned_by 'root' } - end - - describe file('/etc/packetbeat/packetbeat.yml') do - it { should contain 'logging:' } - it { should contain 'output:' } - it { should contain 'protocols:' } - it { should contain 'dns:' } - it { should contain 'memcache:' } - it { should contain 'http:' } - it { should contain 'mongodb:' } - it { should contain 'mysql:' } - it { should contain 'pgsql:' } - it { should contain 'redis:' } - it { should contain 'thrift:' } - it { should contain 'interfaces:' } - it { should contain 'device: any' } - end - - describe file('/etc/init.d/packetbeat') do - it { should exist } - end - - if os[:family] == 'redhat' - describe command('yum versionlock list | grep packetbeat') do - its(:stdout) { should match /packetbeat/ } - end - elsif ['debian', 'ubuntu'].include?(os[:family]) - describe command('sudo apt-mark showhold | grep packetbeat') do - its(:stdout) { should match /packetbeat/ } - end - end - -end - diff --git a/ansible/roles/elastic.beats/test/integration/helpers/serverspec/Gemfile b/ansible/roles/elastic.beats/test/integration/helpers/serverspec/Gemfile deleted file mode 100644 index e6aafc0b..00000000 --- a/ansible/roles/elastic.beats/test/integration/helpers/serverspec/Gemfile +++ /dev/null @@ -1,3 +0,0 @@ -source 'https://rubygems.org' - -gem 'rspec-retry' diff --git a/ansible/roles/elastic.beats/test/integration/helpers/serverspec/spec_helper.rb b/ansible/roles/elastic.beats/test/integration/helpers/serverspec/spec_helper.rb deleted file mode 100644 index 1a1bfb52..00000000 --- a/ansible/roles/elastic.beats/test/integration/helpers/serverspec/spec_helper.rb +++ /dev/null @@ -1,11 +0,0 @@ -require 'serverspec' -set :backend, :exec - -require 'rspec/retry' - -RSpec.configure do |config| - # show retry status in spec process - config.verbose_retry = true - # show exception that triggers a retry if verbose_retry is set to true - config.display_try_failure_messages = true -end \ No newline at end of file diff --git a/ansible/roles/elastic.beats/test/integration/multi.yml b/ansible/roles/elastic.beats/test/integration/multi.yml deleted file mode 100644 index 78b2eb53..00000000 --- a/ansible/roles/elastic.beats/test/integration/multi.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: wrapper playbook for kitchen testing "beats" - hosts: localhost - roles: - - role: ansible-beats - beat: filebeat - beat_conf: - filebeat: - inputs: - - paths: - - /var/log/*.log - type: log - - role: ansible-beats - beat: metricbeat - beat_conf: - metricbeat: - modules: - - module: "system" - metricsets: - - cpu - - filesystem - - network - - process - enabled: true - period: 10s - processes: [".*"] - cpu_ticks: false - vars: - use_repository: true diff --git a/ansible/roles/elastic.beats/test/integration/multi/serverspec/default_spec.rb b/ansible/roles/elastic.beats/test/integration/multi/serverspec/default_spec.rb deleted file mode 100644 index 086b6f28..00000000 --- a/ansible/roles/elastic.beats/test/integration/multi/serverspec/default_spec.rb +++ /dev/null @@ -1,57 +0,0 @@ -require 'spec_helper' - -describe 'Multi Tests' do - - describe service('filebeat') do - it { should be_running } - end - - describe package('filebeat') do - it { should be_installed } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should be_file } - it { should be_owned_by 'root' } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should contain 'filebeat:' } - it { should contain 'logging:' } - it { should contain 'output:' } - end - - describe file('/etc/init.d/filebeat') do - it { should exist } - end - - describe service('metricbeat') do - it { should be_running } - end - - describe package('metricbeat') do - it { should be_installed } - end - - describe file('/etc/metricbeat/metricbeat.yml') do - it { should be_file } - it { should be_owned_by 'root' } - end - - describe file('/etc/metricbeat/metricbeat.yml') do - it { should contain 'module: system' } - it { should contain 'metricsets:' } - it { should contain 'period: 10s' } - it { should contain 'processes:' } - it { should contain 'cpu_ticks:' } - it { should contain 'logging:' } - it { should contain 'output:' } - end - - describe file('/etc/init.d/metricbeat') do - it { should exist } - end - - -end - diff --git a/ansible/roles/elastic.beats/test/integration/oss.yml b/ansible/roles/elastic.beats/test/integration/oss.yml deleted file mode 100644 index 83f57748..00000000 --- a/ansible/roles/elastic.beats/test/integration/oss.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: wrapper playbook for kitchen testing "beats" - hosts: localhost - roles: - - role: ansible-beats - beat: filebeat - beat_conf: - filebeat: - inputs: - - paths: - - /var/log/*.log - type: log - vars: - use_repository: true - oss_version: true diff --git a/ansible/roles/elastic.beats/test/integration/oss/serverspec/default_spec.rb b/ansible/roles/elastic.beats/test/integration/oss/serverspec/default_spec.rb deleted file mode 100644 index 07f0abf7..00000000 --- a/ansible/roles/elastic.beats/test/integration/oss/serverspec/default_spec.rb +++ /dev/null @@ -1,39 +0,0 @@ -require 'spec_helper' - -describe 'Open Source Tests' do - - describe service('filebeat') do - it { should be_running } - end - - describe package('filebeat') do - it { should be_installed } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should be_file } - it { should be_owned_by 'root' } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should contain 'filebeat:' } - it { should contain 'logging:' } - it { should contain 'output:' } - end - - describe file('/etc/init.d/filebeat') do - it { should exist } - end - - if os[:family] == 'redhat' - describe command('yum versionlock list | grep filebeat') do - its(:stdout) { should_not match /filebeat/ } - end - elsif ['debian', 'ubuntu'].include?(os[:family]) - describe command('sudo apt-mark showhold | grep filebeat') do - its(:stdout) { should_not match /filebeat/ } - end - end - -end - diff --git a/ansible/roles/elastic.beats/test/integration/standard-6x.yml b/ansible/roles/elastic.beats/test/integration/standard-6x.yml deleted file mode 100644 index 912742b7..00000000 --- a/ansible/roles/elastic.beats/test/integration/standard-6x.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: wrapper playbook for kitchen testing "beats" - hosts: localhost - roles: - - role: ansible-beats - beat: filebeat - beat_conf: - filebeat: - prospectors: - - paths: - - /var/log/*.log - input_type: log - registry_file: /var/lib/filebeat/registry - vars: - beats_version: 6.8.15 - use_repository: "true" diff --git a/ansible/roles/elastic.beats/test/integration/standard-6x/serverspec/default_spec.rb b/ansible/roles/elastic.beats/test/integration/standard-6x/serverspec/default_spec.rb deleted file mode 100644 index 2aa16923..00000000 --- a/ansible/roles/elastic.beats/test/integration/standard-6x/serverspec/default_spec.rb +++ /dev/null @@ -1,39 +0,0 @@ -require 'spec_helper' - -describe 'Standard Tests' do - - describe service('filebeat') do - it { should be_running } - end - - describe package('filebeat') do - it { should be_installed } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should be_file } - it { should be_owned_by 'root' } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should contain 'filebeat:' } - it { should contain 'logging:' } - it { should contain 'output:' } - end - - describe file('/etc/init.d/filebeat') do - it { should exist } - end - - if os[:family] == 'redhat' - describe command('yum versionlock list | grep filebeat') do - its(:stdout) { should_not match /filebeat/ } - end - elsif ['debian', 'ubuntu'].include?(os[:family]) - describe command('sudo apt-mark showhold | grep filebeat') do - its(:stdout) { should_not match /filebeat/ } - end - end - -end - diff --git a/ansible/roles/elastic.beats/test/integration/standard.yml b/ansible/roles/elastic.beats/test/integration/standard.yml deleted file mode 100644 index eee8be1a..00000000 --- a/ansible/roles/elastic.beats/test/integration/standard.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: wrapper playbook for kitchen testing "beats" - hosts: localhost - roles: - - role: ansible-beats - beat: filebeat - beat_conf: - filebeat: - inputs: - - paths: - - /var/log/*.log - type: log - vars: - use_repository: true diff --git a/ansible/roles/elastic.beats/test/integration/standard/serverspec/default_spec.rb b/ansible/roles/elastic.beats/test/integration/standard/serverspec/default_spec.rb deleted file mode 100644 index 2aa16923..00000000 --- a/ansible/roles/elastic.beats/test/integration/standard/serverspec/default_spec.rb +++ /dev/null @@ -1,39 +0,0 @@ -require 'spec_helper' - -describe 'Standard Tests' do - - describe service('filebeat') do - it { should be_running } - end - - describe package('filebeat') do - it { should be_installed } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should be_file } - it { should be_owned_by 'root' } - end - - describe file('/etc/filebeat/filebeat.yml') do - it { should contain 'filebeat:' } - it { should contain 'logging:' } - it { should contain 'output:' } - end - - describe file('/etc/init.d/filebeat') do - it { should exist } - end - - if os[:family] == 'redhat' - describe command('yum versionlock list | grep filebeat') do - its(:stdout) { should_not match /filebeat/ } - end - elsif ['debian', 'ubuntu'].include?(os[:family]) - describe command('sudo apt-mark showhold | grep filebeat') do - its(:stdout) { should_not match /filebeat/ } - end - end - -end - diff --git a/ansible/roles/elastic.beats/test/matrix.yml b/ansible/roles/elastic.beats/test/matrix.yml deleted file mode 100644 index 707fc812..00000000 --- a/ansible/roles/elastic.beats/test/matrix.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -OS: - - ubuntu-1604 - - ubuntu-1804 - - ubuntu-2004 - - debian-8 - - debian-9 - - debian-10 - - centos-7 - - centos-8 - - amazonlinux-2 -TEST_TYPE: - - standard - - standard-6x - - multi - - config - - oss diff --git a/ansible/roles/elastic.beats/vars/Debian.yml b/ansible/roles/elastic.beats/vars/Debian.yml deleted file mode 100644 index 228492fc..00000000 --- a/ansible/roles/elastic.beats/vars/Debian.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -default_file: "/etc/default" -repo_url: "https://artifacts.elastic.co/packages/{{ beats_major_version }}/apt" diff --git a/ansible/roles/elastic.beats/vars/RedHat.yml b/ansible/roles/elastic.beats/vars/RedHat.yml deleted file mode 100644 index 90911eed..00000000 --- a/ansible/roles/elastic.beats/vars/RedHat.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -default_file: "/etc/sysconfig" -repo_url: "https://artifacts.elastic.co/packages/{{ beats_major_version }}/yum" diff --git a/ansible/roles/elastic.beats/vars/main.yml b/ansible/roles/elastic.beats/vars/main.yml deleted file mode 100644 index de38e051..00000000 --- a/ansible/roles/elastic.beats/vars/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# vars file for beats - -beats_package_url: "https://download.elastic.co/beats" -elastic_repo_key: "https://packages.elastic.co/GPG-KEY-elasticsearch" diff --git a/ansible/roles/elnappo.check_mk_agent/.gitignore b/ansible/roles/elnappo.check_mk_agent/.gitignore deleted file mode 100644 index e923f240..00000000 --- a/ansible/roles/elnappo.check_mk_agent/.gitignore +++ /dev/null @@ -1,173 +0,0 @@ -# Created by https://www.gitignore.io - -### OSX ### -.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear on external disk -.Spotlight-V100 -.Trashes - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - - -### Windows ### -# Windows image file caches -Thumbs.db -ehthumbs.db - -# Folder config file -Desktop.ini - -# Recycle Bin used on file shares -$RECYCLE.BIN/ - -# Windows Installer files -*.cab -*.msi -*.msm -*.msp - -# Windows shortcuts -*.lnk - - -### Linux ### -*~ - -# KDE directory preferences -.directory - -### Python ### -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -### Python Patch ### -.venv/ diff --git a/ansible/roles/elnappo.check_mk_agent/.travis.yml b/ansible/roles/elnappo.check_mk_agent/.travis.yml deleted file mode 100644 index 8f9e14de..00000000 --- a/ansible/roles/elnappo.check_mk_agent/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -language: python -services: docker - -env: - matrix: - - MOLECULE_DISTRO: ubuntu1604 - - MOLECULE_DISTRO: ubuntu1804 - - MOLECULE_DISTRO: debian8 - - MOLECULE_DISTRO: debian9 - - MOLECULE_DISTRO: centos7 - -install: - - pip install molecule docker - -script: - - molecule test - -notifications: - webhooks: https://galaxy.ansible.com/api/v1/notifications/ diff --git a/ansible/roles/elnappo.check_mk_agent/.yamllint b/ansible/roles/elnappo.check_mk_agent/.yamllint deleted file mode 100644 index 3a2255e4..00000000 --- a/ansible/roles/elnappo.check_mk_agent/.yamllint +++ /dev/null @@ -1,13 +0,0 @@ -extends: default - -rules: - braces: - max-spaces-inside: 1 - level: error - brackets: - max-spaces-inside: 1 - level: error - line-length: disable - # NOTE(retr0h): Templates no longer fail this lint rule. - # Uncomment if running old Molecule templates. - # truthy: disable diff --git a/ansible/roles/elnappo.check_mk_agent/LICENSE b/ansible/roles/elnappo.check_mk_agent/LICENSE deleted file mode 100644 index 8af99316..00000000 --- a/ansible/roles/elnappo.check_mk_agent/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 elnappo - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/ansible/roles/elnappo.check_mk_agent/README.md b/ansible/roles/elnappo.check_mk_agent/README.md deleted file mode 100644 index 92453c16..00000000 --- a/ansible/roles/elnappo.check_mk_agent/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# ansible-role-check-mk-agent -[![Build Status](https://travis-ci.org/elnappo/ansible-role-check-mk-agent.svg?branch=master)](https://travis-ci.org/elnappo/ansible-role-check-mk-agent) [![Ansible Galaxy](https://img.shields.io/badge/galaxy-elnappo.check--mk--agent-blue.svg?style=flat)](https://galaxy.ansible.com/elnappo/check-mk-agent/) - -Installs check mk\_agent. Run it with systemd-socket, SSH with sudo or SSH as root (default). Get more information about check\_mk at [https://mathias-kettner.de/check_mk.html]() - -## Features -* Install check_mk agent -* Query check_mk agent over systemd-socket (only with check_mk_agent >= v1.4), SSH as root or SSH with sudo -* Setup firewall if systemd-socket ist used (ufw or firewalld) -* Add SSH host key to check_mk server -* Install check_mk agent plugins/local checks and their dependencies -* **Add hosts to check_mk server via WATO API** - -## Requirements -* Python requests >= v2.5.0 - -Tested on Ubuntu 16.04, 18.04 and CentOS 7, should also run under Debian and RedHat. - -## Install - $ ansible-galaxy install elnappo.check_mk_agent - -## Role Variables -* `check_mk_agent_over_ssh: True` -* `check_mk_agent_with_sudo: False` Adds a user which is allowed to run check_mk_agent with sudo -* `check_mk_agent_add_host_pubkey: False` Import SSH host keys into your check_mk servers known_hosts file -* `check_mk_agent_monitoring_host:` Hostname of your check_mk server -* `check_mk_agent_monitoring_user:` Username under which your check_mk instance runs -* `check_mk_agent_plugins_requirements: []` Requirements for extra plugins -* `check_mk_agent_plugins: []` List of extra plugins to install -* `check_mk_agent_local_checks: {}` -* `check_mk_agent_pubkey_file:` Path to SSH pubkey file -* `check_mk_agent_add_to_wato: False` -* `check_mk_agent_monitoring_host_folder: ""` -* `check_mk_agent_monitoring_host_discovery_mode: new` -* `check_mk_agent_monitoring_host_url:` -* `check_mk_agent_monitoring_host_wato_username:` -* `check_mk_agent_monitoring_host_wato_secret:` -* `check_mk_agent_setup_firewall: True` Add firewall rule (ufw/firewalld) when using systemd-socket -* `check_mk_agent_manual_install: False` Leave agent package installation to the user - -## Included check_mk extra plugins -Could be found under `files/plugins/`. As it is hard to keep these plugins -up-to-date, these will be removed in a future version from the repository. - - -## Dependencies -None. - -## Example Playbook - -```yaml -- hosts: servers - vars: - check_mk_agent_pubkey_file: omd_rsa.pub - check_mk_agent_add_host_pubkey: True - check_mk_agent_monitoring_host: checkmk.example.com - check_mk_agent_monitoring_user: monitoring - check_mk_agent_add_to_wato: True - check_mk_agent_monitoring_host_url: http://cmk.example.com/monitoring/ - check_mk_agent_monitoring_host_wato_username: ansible - check_mk_agent_monitoring_host_wato_secret: 7JTuBt6nETYHG1GS - check_mk_agent_local_checks: - filecount: - src: files/check_mk_local_checks/filecount - cache_time: 600 - filestat: - src: files/check_mk_local_checks/filestat - - roles: - - elnappo.check_mk_agent -``` - -## License - -MIT - -## Author Information - -elnappo diff --git a/ansible/roles/elnappo.check_mk_agent/defaults/main.yml b/ansible/roles/elnappo.check_mk_agent/defaults/main.yml deleted file mode 100644 index ddc54ff3..00000000 --- a/ansible/roles/elnappo.check_mk_agent/defaults/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# defaults file for check_mk agent -check_mk_agent_over_ssh: true -check_mk_agent_with_sudo: false -check_mk_agent_add_host_pubkey: false -check_mk_agent_plugins_requirements: [] -check_mk_agent_plugins: [] -check_mk_agent_local_checks: {} -check_mk_agent_pubkey_file: -check_mk_agent_add_to_wato: false -check_mk_agent_monitoring_host_folder: "" -check_mk_agent_monitoring_host_discovery_mode: "new" -check_mk_agent_setup_firewall: true -check_mk_agent_manual_install: false diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/README b/ansible/roles/elnappo.check_mk_agent/files/plugins/README deleted file mode 100644 index cd89b2c4..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/README +++ /dev/null @@ -1,17 +0,0 @@ -These plugins can be installed in the plugins directory of the Linux agent -in /usr/lib/check_mk_agent/plugins/. Please only install the plugins that -you really need. - -If you want a plugin to be run asynchronously and also in -a larger interval then the normal check interval, then you can -copy it to a subdirectory named after a number of *minutes*, -e.g.: - -/usr/lib/check_mk_agent/plugins/60/mk_zypper - -In that case the agent will: - - - Run this plugin in the background and wait not for it to finish. - - Store the result of the plugin in a cache file below /etc/check_mk/cache. - - Use that file for one hour before running the script again. - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/apache_status b/ansible/roles/elnappo.check_mk_agent/files/plugins/apache_status deleted file mode 100755 index c97e8bc0..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/apache_status +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Check_MK-Agent-Plugin - Apache Server Status -# -# Fetches the server-status page from detected or configured apache -# processes to gather status information about this apache process. -# -# To make this agent plugin work you have to load the status_module -# into your apache process. It is also needed to enable the "server-status" -# handler below the URL "/server-status". -# -# By default this plugin tries to detect all locally running apache processes -# and to monitor them. If this is not good for your environment you might -# create an apache_status.cfg file in MK_CONFDIR and populate the servers -# list to prevent executing the detection mechanism. -# -# It is also possible to override or extend the ssl_ports variable to make the -# check contact other ports than 443 with HTTPS requests. - -import os, sys, urllib2, re, socket - -config_dir = os.getenv("MK_CONFDIR", "/etc/check_mk") -config_file = config_dir + "/apache_status.conf" - -if not os.path.exists(config_file): - config_file = config_dir + "/apache_status.cfg" - -# We have to deal with socket timeouts. Python > 2.6 -# supports timeout parameter for the urllib2.urlopen method -# but we are on a python 2.5 system here which seem to use the -# default socket timeout. We are local here so set it to 1 second. -socket.setdefaulttimeout(5.0) - -# None or list of (proto, ipaddress, port) tuples. -# proto is 'http' or 'https' -servers = None -ssl_ports = [ 443, ] - -if os.path.exists(config_file): - execfile(config_file) - - -def try_detect_servers(): - results = [] - - for line in os.popen('netstat -tlnp 2>/dev/null').readlines(): - parts = line.split() - # Skip lines with wrong format - if len(parts) < 7 or '/' not in parts[6]: - continue - - pid, proc = parts[6].split('/', 1) - to_replace = re.compile('^.*/') - proc = to_replace.sub('', proc) - - procs = [ 'apache2', 'httpd', 'httpd2-prefork', 'httpd2-worker', 'httpd.worker', 'fcgi-pm' ] - # the pid/proc field length is limited to 19 chars. Thus in case of - # long PIDs, the process names are stripped of by that length. - # Workaround this problem here - procs = [ p[:19 - len(pid) - 1] for p in procs ] - - # Skip unwanted processes - if proc not in procs: - continue - - address, port = parts[3].rsplit(':', 1) - port = int(port) - - # Use localhost when listening globally - if address == '0.0.0.0': - address = '127.0.0.1' - elif address == '::': - address = '[::1]' - elif ':' in address: - address = '[%s]' % address - - # Switch protocol if port is SSL port. In case you use SSL on another - # port you would have to change/extend the ssl_port list - if port in ssl_ports: - proto = 'https' - else: - proto = 'http' - - results.append((proto, address, port)) - - return results - - -if servers is None: - servers = try_detect_servers() - - -if not servers: - sys.exit(0) - - -sys.stdout.write('<<>>\n') -for server in servers: - if isinstance(server, tuple): - proto, address, port = server - page = 'server-status' - else: - proto = server['protocol'] - address = server['address'] - port = server['port'] - page = server.get('page', 'server-status') - - portspec = port and ":%d" % port or "" - - try: - url = '%s://%s%s/%s?auto' % (proto, address, portspec, page) - # Try to fetch the status page for each server - try: - request = urllib2.Request(url, headers={"Accept" : "text/plain"}) - fd = urllib2.urlopen(request) - except urllib2.URLError, e: - if 'unknown protocol' in str(e): - # HACK: workaround misconfigurations where port 443 is used for - # serving non ssl secured http - url = 'http://%s%s/server-status?auto' % (address, portspec) - fd = urllib2.urlopen(url) - else: - raise - except Exception, e: - if 'doesn\'t match' in str(e): - # HACK: workaround if SSL port is found and localhost is using - # SSL connections but certificate does not match - portspec = ':80' - url = 'http://%s%s/server-status?auto' % (address, portspec) - fd = urllib2.urlopen(url) - else: - raise - - for line in fd.read().split('\n'): - if not line.strip(): - continue - if line.lstrip()[0] == '<': - # Seems to be html output. Skip this server. - break - - sys.stdout.write("%s %s %s\n" % (address, port, line)) - except urllib2.HTTPError, e: - sys.stderr.write('HTTP-Error (%s%s): %s %s\n' % (address, portspec, e.code, e)) - - except Exception, e: - sys.stderr.write('Exception (%s%s): %s\n' % (address, portspec, e)) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/asmcmd.sh b/ansible/roles/elnappo.check_mk_agent/files/plugins/asmcmd.sh deleted file mode 100755 index 291795a3..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/asmcmd.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -su - griduser -c "asmcmd $@" diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/db2_mem b/ansible/roles/elnappo.check_mk_agent/files/plugins/db2_mem deleted file mode 100755 index 98458046..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/db2_mem +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -INSTANCES=$(ps -ef | grep db2sysc | awk '{print $1}' | sort -u | grep -v root) - -if [ "$INSTANCES" ] ; then - echo "<<>>" - for INSTANCE in $INSTANCES; do - echo "Instance $INSTANCE" - su - $INSTANCE -c "db2pd -dbptnmem " | egrep '(Memory Limit|HWM usage)' - done -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/dnsclient b/ansible/roles/elnappo.check_mk_agent/files/plugins/dnsclient deleted file mode 100755 index a3648c0e..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/dnsclient +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This check can be used to test the name resolution of a given host -# address using the local resolver of the system this script is -# running on. - -HOSTADDRESSES=mathias-kettner.de - -if [ -e $MK_CONFDIR/dnsclient.cfg ] ; then - . $MK_CONFDIR/dnsclient.cfg -fi - -echo "<<>>" -for HOSTADDRESS in $HOSTADDRESSES -do - ADDRESSES=`nslookup $HOSTADDRESS | sed -n -e 1,3d -e '/^Address: *\(.*\)$/s//\1/p'` - if [ ! "$ADDRESSES" ] ; then - STATE=2 - OUTPUT="CRIT - $HOSTADDRESS could not be resolved" - else - STATE=0 - OUTPUT="OK - $HOSTADDRESS resolved into $ADDRESSES" - fi - echo Resolve_$HOSTADDRESS $STATE $OUTPUT -done diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/hpux_lunstats b/ansible/roles/elnappo.check_mk_agent/files/plugins/hpux_lunstats deleted file mode 100755 index 9391e910..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/hpux_lunstats +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/ksh -# Monitor status of LUNs on HP-UX -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Put this file into /usr/lib/check_mk_agent/plugins. Then -# reinventorize your host. -# Actually querying these stats is quite slow since they freshly update -# on each call. If you have a few 1000 luns then this will not work. - -get_stats() -{ - scsimgr get_stat -D $LUN | tr '\=' ':' | grep -e 'STATISTICS FOR LUN' -e 'Bytes' -e 'Total I/Os processed' -e 'I/O failure' -e 'IO failures due -to' - return $? -} - - -# Ex: -#LUN PATH INFORMATION FOR LUN : /dev/pt/pt2 -#World Wide Identifier(WWID) = -#LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk5 -#World Wide Identifier(WWID) = 0x60a98000572d44745634645076556357 -#LUN PATH INFORMATION FOR LUN : /dev/rdisk/disk6 - -get_lun_map() -{ -scsimgr lun_map | egrep '^[[:space:]]*(LUN PATH|World Wide Identifier)' | tr '\=' ':' -} - - -main() -{ -get_lun_map | while read line ; do - descr=$(echo $line | awk -F: '{print $1}') - val=$( echo $line | awk -F: '{print $2}') - case $descr in - LUN*) - if echo $val | grep /dev/rdisk 1>/dev/null; then - DMP=yes - LUN=$val - else - DMP=no - unset LUN - fi - ;; - World*) - if [ $DMP = "yes" ]; then - echo "WWID: $val" - get_stats $LUN - fi - ;; - *) - echo "Fehler:" - echo $line - echo $descr - echo $val - sleep 1 - ;; - esac -done -} - - - -# Verify the system is using new multipath device model. -if [ -d /dev/rdisk ] && [ -d /dev/disk ]; then - echo '<<>>' - main -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/hpux_statgrab b/ansible/roles/elnappo.check_mk_agent/files/plugins/hpux_statgrab deleted file mode 100755 index c592ff18..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/hpux_statgrab +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# this is for users who compiled statgrab on hp-ux. -# note you'll need a 0.18+ version, from their github page at -# https://github.com/i-scream/libstatgrab -# flags used for compiling - disable documentation, examples and set*id - - -if which statgrab > /dev/null ; then - if statgrab const. cpu. general. mem. page. proc. swap. user. > /tmp/statgrab.$$ 2>/dev/null - then - for s in proc cpu page - do - echo "<<>>" - cat /tmp/statgrab.$$ | grep "^$s\." | cut -d. -f2-99 | sed 's/ *= */ /' - done - - echo '<<>>' - cat /tmp/statgrab.$$ | egrep "^(swap|mem)\." | sed 's/ *= */ /' - - echo '<<>>' - cat /tmp/statgrab.$$ | egrep "^general\.uptime" | sed 's/.* //' - - fi - [ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/isc_dhcpd b/ansible/roles/elnappo.check_mk_agent/files/plugins/isc_dhcpd deleted file mode 100755 index fb29eb47..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/isc_dhcpd +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/python -# Monitor leases if ISC-DHCPD - -import os, sys, time, re, calendar - -conf_file = None -for path in [ '/etc/dhcpd.conf', '/etc/dhcp/dhcpd.conf', '/usr/local/etc/dhcpd.conf' ]: - if os.path.exists(path): - conf_file = path - break - -leases_file = None -for path in [ - '/var/lib/dhcp/db/dhcpd.leases', - '/var/lib/dhcp/dhcpd.leases', - '/var/lib/dhcpd/dhcpd.leases', # CentOS - ]: - if os.path.exists(path): - leases_file = path - break - -# If no configuration and leases are found, we assume that -# no dhcpd is running. -if not conf_file or not leases_file: - sys.exit(0) - -pidof_dhcpd = os.popen("pidof dhcpd").read().strip() -sys.stdout.write('<<>>\n[general]\nPID: %s\n' % pidof_dhcpd) - -sys.stdout.write('[pools]\n') - - -def parse_config(filename): - for line in file(filename): - line = line.strip() - if line.startswith("include"): - included_file = re.search('include\s+"(.*)"', line).group(1) - parse_config(included_file) - elif line.startswith("range"): - sys.stdout.write(line[5:].strip("\t ;") + "\n") - -parse_config(conf_file) - - -# lease 10.1.1.81 { -# starts 3 2015/09/09 11:42:20; -# ends 3 2015/09/09 19:42:20; -# tstp 3 2015/09/09 19:42:20; -# cltt 3 2015/09/09 11:42:20; -# binding state free; -# hardware ethernet a4:5e:60:de:1f:c3; -# uid "\001\244^`\336\037\303"; -# set ddns-txt = "318c69bae8aeae6f8c723e96de933c7149"; -# set ddns-fwd-name = "Sebastians-MBP.dhcp.mathias-kettner.de"; -# } - -sys.stdout.write('[leases]\n') -now = time.time() -ip_address = None -binding_state = None -seen_addresses = set() -for line in file(leases_file): - parts = line.strip().rstrip(";").split() - if not parts: - continue - - if parts[0] == "lease": - ip_address = parts[1] - elif parts[0] == "ends": - if parts[1] != "never": - ends_date_string = parts[2] + " " + parts[3] - ends_date = calendar.timegm(time.strptime(ends_date_string, "%Y/%m/%d %H:%M:%S")) - if ends_date < now: - ip_address = None # skip this address, this lease is outdated - - elif parts[0] == "binding" and parts[1] == "state": - binding_state = parts[2] - - elif parts[0] == "}": - if ip_address and binding_state == "active" and ip_address not in seen_addresses: - sys.stdout.write("%s\n" % ip_address) - seen_addresses.add(ip_address) - ip_address = None - binding_state = None - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/jar_signature b/ansible/roles/elnappo.check_mk_agent/files/plugins/jar_signature deleted file mode 100755 index 7e887533..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/jar_signature +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This agent uses the program "jarsigner" to read ssl certificate -# information of jar files and outputs the information to stdout -# for the Check_MK check. -# We assume that all files in the jar archive are signed with the -# same certificate. So we only deal with the last signed file here. - -JAVA_HOME=/home/oracle/bin/jdk_latest_version -JAR_PATH=/home/oracle/fmw/11gR2/as_1/forms/java/*.jar - -# Let user override these defaults in a configuration file -if [ -e $MK_CONFDIR/jar_signature.cfg ] ; then - . $MK_CONFDIR/jar_signature.cfg -fi - -PATH=$JAVA_HOME/bin:$PATH - -echo "<<>>" -for JAR in $JAR_PATH; do - if [ -e "$JAR" ] ; then # avoid entry for '*.jar' - echo "[[[${JAR##*/}]]]" - OUTPUT=$(jarsigner -verify -verbose -certs "$JAR") - LINE=$(echo "$OUTPUT" | grep -n ^s | tail -n1 | cut -d: -f1) - echo "$(echo "$OUTPUT" | tail -n +$LINE)" - echo - fi -done - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/kaspersky_av b/ansible/roles/elnappo.check_mk_agent/files/plugins/kaspersky_av deleted file mode 100755 index ddb727e0..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/kaspersky_av +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -if [ -f /opt/kaspersky/kav4fs/bin/kav4fs-control ] -then - echo "<<>>" - /opt/kaspersky/kav4fs/bin/kav4fs-control --get-stat Update - - echo "<<>>" - /opt/kaspersky/kav4fs/bin/kav4fs-control -Q --get-stat - - echo "<<>>" - /opt/kaspersky/kav4fs/bin/kav4fs-control --get-task-list - -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/lnx_quota b/ansible/roles/elnappo.check_mk_agent/files/plugins/lnx_quota deleted file mode 100755 index 919417b8..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/lnx_quota +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -if type repquota >/dev/null ; then - echo "<<>>" - - # User Quota - for VOL in $(grep -E usr[j]?quota /etc/fstab | tr -s '\t' ' ' | cut -d' ' -f2); do - echo "[[[usr:$VOL]]]" - repquota -up $VOL | tail -n +6 | head -n -2 - done - - # Group Quota - for VOL in $(grep -E grp[j]?quota /etc/fstab | tr -s '\t' ' ' | cut -d' ' -f2); do - echo "[[[grp:$VOL]]]" - repquota -gp $VOL | tail -n +6 | head -n -2 - done -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/lvm b/ansible/roles/elnappo.check_mk_agent/files/plugins/lvm deleted file mode 100755 index 00cd5fec..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/lvm +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -echo "<<>>" -vgs --units b --nosuffix --noheadings --separator ' ' - -echo "<<>>" -lvs --units b --nosuffix --noheadings --separator '|' - -#echo "<<>>" -#pvs --units b --nosuffix --noheadings --separator ' ' diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mailman_lists b/ansible/roles/elnappo.check_mk_agent/files/plugins/mailman_lists deleted file mode 100755 index f9ef7987..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mailman_lists +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This Check_MK-Agent plugin gathers information about mailinglists hosted -# by the local mailman instance. - -# Needed if you have located your mailman python modules not in default -# python module paths -import sys -sys.path.append("/usr/local/mailman") -sys.path.append("/usr/lib/mailman") - -# Set to True to filter out all "hidden" mailinglists -only_advertised = True - -from Mailman import Utils, MailList # pylint: disable=import-error - -# 1. list memberships -sys.stdout.write('<<>>\n') -total_members = set([]) -for name in sorted(Utils.list_names()): - mlist = MailList.MailList(name, lock=0) - if only_advertised and not mlist.advertised: - continue - - rmembers = mlist.getRegularMemberKeys() - dmembers = mlist.getDigestMemberKeys() - members = rmembers + dmembers - total_members.update(members) - - sys.stdout.write('%s %d\n' % (name, len(members))) -sys.stdout.write('TOTAL %d\n' % len(total_members)) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_apt b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_apt deleted file mode 100755 index 31f930ba..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_apt +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash -# Check for APT updates (Debian, Ubuntu) -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# TODO: -# Einstellungen: -# - upgrade oder dist-upgrade -# - vorher ein update machen -# Bakery: -# - Bakelet anlegen -# - Async-Zeit einstellbar machen und das Ding immer async laufen lassen -# Check programmieren: -# * Schwellwerte auf Anzahlen -# * Regexen auf Pakete, die zu CRIT/WARN führen -# - Graph malen mit zwei Kurven - -# This variable can either be "upgrade" or "dist-upgrade" -UPGRADE=upgrade -DO_UPDATE=yes - - -function check_apt_update { - if [ "$DO_UPDATE" = yes ] ; then - # NOTE: Even with -qq, apt-get update can output several lines to - # stderr, e.g.: - # - # W: There is no public key available for the following key IDs: - # 1397BC53640DB551 - apt-get update -qq 2> /dev/null - fi - apt-get -o 'Debug::NoLocking=true' -o 'APT::Get::Show-User-Simulation-Note=false' -s -qq "$UPGRADE" | grep -v '^Conf' -} - - -if type apt-get > /dev/null ; then - echo '<<>>' - out=$(check_apt_update) - if [ -z "$out" ]; then - echo "No updates pending for installation" - else - echo "$out" - fi -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_ceph b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_ceph deleted file mode 100755 index 3cb167e7..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_ceph +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Check for APT updates (Debian, Ubuntu) -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -# Config file must contain: -# USER=client.admin -# KEYRING=/etc/ceph/ceph.client.admin.keyring - - -if [ -e "$MK_CONFDIR/ceph.cfg" ]; then - . $MK_CONFDIR/ceph.cfg -fi - - -if [ ! -z "$USER" ] && [ ! -z "$KEYRING" ]; then - CEPH_CMD="ceph -n $USER --keyring=$KEYRING" - echo "<<>>" - $CEPH_CMD -s -f json-pretty - echo "<<>>" - $CEPH_CMD df detail -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_cups_queues b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_cups_queues deleted file mode 100755 index 01e60f95..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_cups_queues +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -if type lpstat > /dev/null 2>&1 ; then - export LC_TIME="en_US.UTF-8" - echo "<<>>" - CPRINTCONF=/etc/cups/printers.conf - if [ -r "$CPRINTCONF" ] ; then - LOCAL_PRINTERS=$(grep -E "<(Default)?Printer .*>" $CPRINTCONF | awk '{print $2}' | sed -e 's/>//') - lpstat -p | while read LINE - do - PRINTER=$(echo "$LINE" | awk '{print $2}') - if echo "$LOCAL_PRINTERS" | grep -q "$PRINTER"; then - echo "$LINE" - fi - done - echo '---' - lpstat -o | while read LINE - do - PRINTER=${LINE%%-*} - if echo "$LOCAL_PRINTERS" | grep -q "$PRINTER"; then - echo "$LINE" - fi - done - else - PRINTER=$(lpstat -p) - echo "$PRINTER" - echo '---' - QUEUE=$(lpstat -o | sort) - echo "$QUEUE" - fi -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_db2.aix b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_db2.aix deleted file mode 100755 index ed59cc37..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_db2.aix +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/ksh -# Monitor DB/2 databases on AIX -# $HOME/sqllib/db2profile - -# This script can be called in two ways -# Without any arguments: -# Checks if cache of the instances is up to date and starts the -# command 'mk_db.aix query {instance}' if applicable -# If its outdated the script calls itself with the argument 'query' -# With 'query {instance}' as argument: -# Does the actual queries to the db2 instance and writes this info -# into the cache file -# Each instance has its own cache file and all of them are filled in parallel - -if [ ! "$MK_CONFDIR" ] ; then - echo "MK_CONFDIR not set!" >&2 - exit 1 -fi - -if [ ! "$MK_VARDIR" ] ; then - export MK_VARDIR=$MK_CONFDIR -fi - -function waitmax -{ - TIMEOUT=${1}0 - SIGNAL=9 - shift - - # Run command in background - if [ "${#}" -ge 1 ] ; then - ksh -c "$*" & - else - TEST=$(cat) - ksh -c "$TEST" & - fi - - PID=$! - - # Wait for termination within TIMOUT seconds - while [ $TIMEOUT -gt 0 ] - do - TIMEOUT=$((TIMEOUT - 1)) - if [ ! -e /proc/$PID ] ; then - return 0 - fi - perl -e "select(undef, undef, undef, 0.1);" - done - - # Process did not terminate in time. Kill and - # return with an error - kill -9 $PID - return 255 -} - -function query_instance { - INSTANCE=$1 - # find home directory - HOMEDIR=$(grep "^$INSTANCE" /etc/passwd | awk -F: '{print $6}' | grep "$INSTANCE$") - NOW=$(perl -e "print time();") - - waitmax 200 << WAITMAX - su $INSTANCE << EOF - - if [ ! -f $HOMEDIR/sqllib/db2profile ] ; - then - exit 0 - fi - - . $HOMEDIR/sqllib/db2profile >/dev/null 2>&1 ; - - - function compare_version_greater_equal { - GREATER_ONE=\\\$(echo "\\\$1 \\\$2" | awk "{if (\\\$1 >= \\\$2) print \\\$1; else print \\\$2}") - if [ \\\$GREATER_ONE == \\\$1 ] ; then - return 0 - else - return 1 - fi - } - - echo '<<>>' - DBVERSION=\\\$(db2 get snapshot for dbm | grep -e 'Product name' -e 'Service level' | awk -v FS='=' '{print \\\$2}' | sed 'N;s/\n/,/g' | sed 's/ //g') - echo $INSTANCE \\\$DBVERSION - VERSION_NUMBER=\\\$(echo \\\$DBVERSION | sed -e 's/DB2v\\\(.*\),.*/\\\1/' | awk -v FS="." '{print \\\$1"."\\\$2}') - - DBS=\\\$(db2 list database directory on $HOMEDIR | grep 'Database name' | awk '{ print \\\$NF }') - - GET_PORT=1 - DB_PORT='port 0' - for DB in \\\$DBS; do - db2 connect to \\\$DB > /dev/null; - if [ $? -nq 0 ] ; then - exit 1 - fi - - if [ 1 -eq \\\$GET_PORT ] ; then - # Each database in an instance has the same port information - db2_tcp_service=\\\$(db2 -x get dbm cfg | grep $INSTANCE | grep "TCP/IP Service" | awk -v FS='=' '{print \\\$2}'|tr -d ' ') - if ( grep \\\$db2_tcp_service /etc/services | grep -q "^\\\$db2_tcp_service " ); then - DB_PORT='port '\\\$(grep \\\$db2_tcp_service /etc/services | grep "^\\\$db2_tcp_service " | awk '{print \\\$2}' | awk -v FS="/" '{print \\\$1}') - fi - GET_PORT=0 - fi - - echo "<<>>" - echo "[[[$INSTANCE:\\\$DB]]]" - db2 "SELECT tbsp_name, tbsp_type, tbsp_state, tbsp_usable_size_kb, tbsp_total_size_kb, tbsp_used_size_kb, tbsp_free_size_kb FROM sysibmadm.tbsp_utilization WHERE tbsp_type = 'DMS' UNION ALL SELECT tu.tbsp_name, tu.tbsp_type, tu.tbsp_state, tu.tbsp_usable_size_kb, tu.tbsp_total_size_kb, tu.tbsp_used_size_kb, (cu.fs_total_size_kb - cu.fs_used_size_kb) AS tbsp_free_size_kb FROM sysibmadm.tbsp_utilization tu INNER JOIN ( SELECT tbsp_id, 1 AS fs_total_size_kb, 0 AS fs_used_size_kb FROM sysibmadm.container_utilization WHERE (fs_total_size_kb IS NULL OR fs_used_size_kb IS NULL) GROUP BY tbsp_id) cu ON (tu.tbsp_type = 'SMS' AND tu.tbsp_id = cu.tbsp_id) UNION ALL SELECT tu.tbsp_name, tu.tbsp_type, tu.tbsp_state, tu.tbsp_usable_size_kb, tu.tbsp_total_size_kb, tu.tbsp_used_size_kb, (cu.fs_total_size_kb - cu.fs_used_size_kb) AS tbsp_free_size_kb FROM sysibmadm.tbsp_utilization tu INNER JOIN ( SELECT tbsp_id, SUM(fs_total_size_kb) AS fs_total_size_kb, SUM(fs_used_size_kb) AS fs_used_size_kb FROM sysibmadm.container_utilization WHERE (fs_total_size_kb IS NOT NULL AND fs_used_size_kb IS NOT NULL) GROUP BY tbsp_id) cu ON (tu.tbsp_type = 'SMS' AND tu.tbsp_id = cu.tbsp_id)" | awk '{print \\\$1" "\\\$2" "\\\$3" "\\\$4" "\\\$5" "\\\$6" "\\\$7}' | sed -e '/^[ ]*$/d' -e '/^-/d' -e '/selected/d' - - echo "<<>>" - echo "TIMESTAMP $NOW" - cat \\\$(db2 get dbm cfg|grep "Default database path"|awk -v FS="=" '{print \\\$2"/sqllib/db2nodes.cfg"}'|tr -d ' ') | sed "s/\(.*\)/$INSTANCE:\\\$DB node \1/" - db2 -x "SELECT deadlocks from sysibmadm.snapdb" | tr -d ' ' | sed "s/\(.*\)/$INSTANCE:\\\$DB deadlocks \1/" - db2 -x "SELECT lock_waits from sysibmadm.snapdb" | tr -d ' ' | sed "s/\(.*\)/$INSTANCE:\\\$DB lockwaits \1/" - db2 -x "SELECT sort_overflows from sysibmadm.snapdb" | tr -d ' ' | sed "s/\(.*\)/$INSTANCE:\\\$DB sortoverflows \1/" - - echo "<<>>" - echo "[[[$INSTANCE:\\\$DB]]]" - echo "TIMESTAMP $NOW" - cat \\\$(db2 get dbm cfg|grep "Default database path"|awk -v FS="=" '{print \\\$2"/sqllib/db2nodes.cfg"}'|tr -d ' ') | sed 's/\(.*\)/node \1/' - db2 -x "SELECT 'usedspace', total_log_used from sysibmadm.snapdb" | awk '{print \\\$1" "\\\$2}' - db2 -x "SELECT NAME, VALUE FROM SYSIBMADM.DBCFG WHERE NAME IN ('logfilsiz','logprimary','logsecond')"| awk '{print \\\$1" "\\\$2}' - - echo "<<>>" - echo "[[[$INSTANCE:\\\$DB]]]" - echo \\\$DB_PORT - echo "connections " | tr -d '\n' - db2 list applications | grep -v Auth | grep -v Name | sed -e '/^$/d' | wc -l | tr -d ' ' - # TODO: the time command seems to be broken and outputs 1 second steps - ksh -c "time db2 connect to \\\$DB > /dev/null" 2>&1 | grep real | awk '{print "latency "\\\$2}'| sed -e 's/m/:/' -e 's/s//' - - echo "<<>>" - echo "[[[$INSTANCE:\\\$DB]]]" - cat \\\$(db2 get dbm cfg|grep "Default database path"|awk -v FS="=" '{print \\\$2"/sqllib/db2nodes.cfg"}'|tr -d ' ') | sed "s/\(.*\)/node \1/" - db2 "SELECT SUBSTR(BP_NAME,1,14) AS BP_NAME, TOTAL_HIT_RATIO_PERCENT, DATA_HIT_RATIO_PERCENT, INDEX_HIT_RATIO_PERCENT, XDA_HIT_RATIO_PERCENT FROM SYSIBMADM.BP_HITRATIO" | grep -v "selected." | sed -e '/^$/d' -e '/^-/d' - - echo "<<>>" - echo "[[[$INSTANCE:\\\$DB]]]" - db2 -x "get snapshot for database on \\\$DB" | grep -e "^Total sorts" -e "^Sort overflows" | tr -d '=' - - echo "<<>>" - echo "[[[$INSTANCE:\\\$DB]]]" - if compare_version_greater_equal \\\$VERSION_NUMBER 10.5; then - # MON_GET_DATBASE(-2) gets information of all active members - db2 -x "select LAST_BACKUP from TABLE (MON_GET_DATABASE(-2))" | grep -v "selected." | tail -n 1 - else - db2 -x "select SQLM_ELM_LAST_BACKUP from table(SNAPSHOT_DATABASE( cast( null as VARCHAR(255)), cast(null as int))) as ref" | grep -v "selected." | tail -n 1 - fi - - # disconnect from database - db2 connect reset > /dev/null - done -EOF -WAITMAX - return $? -} - -if [ "$1" = "query" ]; then - query_instance $2 - exit $? -else - #### RUN CACHED ##### - function file_age { - /usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($^T-$mtime);' "$1" - } - - if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi - - - if [ -e "$MK_VARDIR/cache/mk_db2.aix.cache" ] ; then - rm $MK_VARDIR/cache/mk_db2.aix.cache - fi - INSTANCES=$(ps -ef | grep [d]b2sysc | awk '{print $1 }') - - # Output any section headers - # If no data is available there will be at least the section headers - # This happens when a database is down. In this scenario the db2_version check - # should go CRIT and the other checks go stale - echo "<<>>" - echo "<<>>" - echo "<<>>" - echo "<<>>" - echo "<<>>" - echo "<<>>" - echo "<<>>" - echo "<<>>" - - for INSTANCE in $INSTANCES; do - CACHEFILE="$MK_VARDIR/cache/mk_db2.aix.cache.$INSTANCE" - MAXAGE=300 - - # Check if the creation of the cache takes way to long and delete this file - # The process might have crashed... - # Since the processes are called with waitmax it is very unlikely that - # there are still unwanted processes soiling the system. - if [ -e "$CACHEFILE.new" ] ; then - AGE=$(file_age "$CACHEFILE.new") - if [ $AGE -ge $((MAXAGE * 10)) ] ; then - rm "$CACHEFILE.new" - fi - fi - - # Check if the creation of the cache takes suspiciously long and return - # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE - if [ -e "$CACHEFILE.new" ] ; then - AGE=$(file_age "$CACHEFILE.new") - if [ $AGE -ge $((MAXAGE * 2)) ] ; then - return - fi - fi - - # Check if cache file exists and is recent enough - USE_CACHEFILE="" - if [ -s "$CACHEFILE" ] ; then - AGE=$(file_age "$CACHEFILE") - if [ $AGE -le $MAXAGE ] ; then USE_CACHEFILE=1 ; fi - # Output the file in any case, even if it is - # outdated. The new file will not yet be available - cat "$CACHEFILE" - fi - - # Cache file outdated and new job not yet running? Start it - if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then - echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; ./$0 query $INSTANCE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup ksh 2>/dev/null & - fi - - done - -fi - -exit 0 diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_db2.linux b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_db2.linux deleted file mode 100755 index 3d42b4f0..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_db2.linux +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2018 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Agent plugin to monitor DB/2 databases on Linux. -# -# Note: The script has to be accessible and executable by the DB/2 -# instance users to function properly. -# -# The script is called in two different modes. Without arguments it: -# -# - outputs the db2_version section, -# - collects all db2 instances and databases, and -# - runs the script for each database as an instance user -# -# With arguments the script queries the information for each database and -# outputs the corresponding sections. - -if [ $# -eq 0 ]; then - if type timeout >/dev/null 2>&1 ; then - function waitmax () { - timeout "$@" - } - fi - - INSTANCES=$(ps -ef | grep "[d]b2sysc" | awk '{print $1 }') - - for INSTANCE in $INSTANCES; do - NOW=$(perl -e "print time();") - - echo "" - echo '<<>>' - DBVERSION=$(su - "${INSTANCE}" -c "db2 get snapshot for dbm" | grep -e 'Product name' -e 'Service level' | awk -v FS='=' '{print $2}' | sed 'N;s/\n/,/g' | sed 's/ //g') - echo "$INSTANCE" "$DBVERSION" - VERSION_NUMBER=$(echo "$DBVERSION" | sed -e 's/DB2v\(.*\),.*/\1/' | awk -v FS="." '{print $1"."$2}') - - DBS=$(su - "${INSTANCE}" -c "db2 list active databases" | grep 'Database name' | awk '{ print $NF }') - - DB_PORT='port 0' - GET_PORT=1 - for DB in $DBS; do - if [ 1 -eq $GET_PORT ] ; then - # Each database in an instance has the same port information - db2_tcp_service=$(su - "${INSTANCE}" -c "db2 -x get dbm cfg" | grep "TCP/IP Service" | awk -v FS='=' '{print $2}' | tr -d ' ') - if ( grep "$db2_tcp_service" /etc/services | grep -q "^$db2_tcp_service " ); then - DB_PORT='port '$(grep "$db2_tcp_service" /etc/services | grep "^$db2_tcp_service " | awk '{print $2}' | awk -v FS="/" '{print $1}') - fi - GET_PORT=0 - fi - SCRIPT=$(readlink -f "$0") - waitmax -s 9 10 su - "${INSTANCE}" -c "\"${SCRIPT}\" \"${INSTANCE}\" \"${DB}\" \"${VERSION_NUMBER}\" \"${NOW}\" \"${DB_PORT}\"" - done - done -else - INSTANCE=$1 - DB=$2 - VERSION_NUMBER=$3 - NOW=$4 - DB_PORT=$5 - - function compare_version_greater_equal { - GREATER_ONE=$(echo "$1 $2" | awk "{if ($1 >= $2) print $1; else print $2}") - if [ "$GREATER_ONE" == "$1" ] ; then - return 0 - else - return 1 - fi - return 0 - } - - millis_before=$(date +"%s%3N") - if db2 +o connect to "$DB"; then - millis_after=$(date +"%s%3N") - millis_diff=$(( millis_after - millis_before )) - - echo "<<>>" - echo "[[[$INSTANCE:$DB]]]" - echo "$DB_PORT" - echo "connections " | tr -d '\n' - db2 -x "SELECT count(*)-1 FROM TABLE(mon_get_connection(CAST(NULL AS BIGINT), -2)) AS t" - echo "latency ${millis_diff}" - - echo "<<>>" - echo "[[[$INSTANCE:$DB]]]" - SQL="SELECT tbsp_name, tbsp_type, tbsp_state, tbsp_usable_size_kb, tbsp_total_size_kb, tbsp_used_size_kb, tbsp_free_size_kb FROM sysibmadm.tbsp_utilization WHERE tbsp_type = 'DMS' UNION ALL SELECT tu.tbsp_name, tu.tbsp_type, tu.tbsp_state, tu.tbsp_usable_size_kb, tu.tbsp_total_size_kb, tu.tbsp_used_size_kb, (cu.fs_total_size_kb - cu.fs_used_size_kb) AS tbsp_free_size_kb FROM sysibmadm.tbsp_utilization tu INNER JOIN ( SELECT tbsp_id, 1 AS fs_total_size_kb, 0 AS fs_used_size_kb FROM sysibmadm.container_utilization WHERE (fs_total_size_kb IS NULL OR fs_used_size_kb IS NULL) GROUP BY tbsp_id) cu ON (tu.tbsp_type = 'SMS' AND tu.tbsp_id = cu.tbsp_id) UNION ALL SELECT tu.tbsp_name, tu.tbsp_type, tu.tbsp_state, tu.tbsp_usable_size_kb, tu.tbsp_total_size_kb, tu.tbsp_used_size_kb, (cu.fs_total_size_kb - cu.fs_used_size_kb) AS tbsp_free_size_kb FROM sysibmadm.tbsp_utilization tu INNER JOIN ( SELECT tbsp_id, SUM(fs_total_size_kb) AS fs_total_size_kb, SUM(fs_used_size_kb) AS fs_used_size_kb FROM sysibmadm.container_utilization WHERE (fs_total_size_kb IS NOT NULL AND fs_used_size_kb IS NOT NULL) GROUP BY tbsp_id) cu ON (tu.tbsp_type = 'SMS' AND tu.tbsp_id = cu.tbsp_id)" - db2 "${SQL}" | awk '{print $1" "$2" "$3" "$4" "$5" "$6" "$7}' | sed -e '/^[ ]*$/d' -e '/^-/d' -e '/selected/d' - - echo "<<>>" - echo "TIMESTAMP $NOW" - echo "$INSTANCE:$DB deadlocks " | tr -d '\n' - db2 -x "SELECT deadlocks from sysibmadm.snapdb" | tr -d ' ' - echo "$INSTANCE:$DB lockwaits " | tr -d '\n' - db2 -x "SELECT lock_waits from sysibmadm.snapdb" | tr -d ' ' - echo "$INSTANCE:$DB sortoverflows " | tr -d '\n' - db2 -x "SELECT sort_overflows from sysibmadm.snapdb" | tr -d ' ' - - echo "<<>>" - echo "TIMESTAMP $NOW" - echo "[[[$INSTANCE:$DB]]]" - echo "usedspace " | tr -d '\n' - db2 -x "SELECT total_log_used from sysibmadm.snapdb" | tr -d ' ' - db2 -x "SELECT NAME, VALUE FROM SYSIBMADM.DBCFG WHERE NAME IN ('logfilsiz','logprimary','logsecond')"| awk '{print $1" "$2}' - - echo "<<>>" - echo "[[[$INSTANCE:$DB]]]" - db2 "SELECT SUBSTR(BP_NAME,1,14) AS BP_NAME, TOTAL_HIT_RATIO_PERCENT, DATA_HIT_RATIO_PERCENT, INDEX_HIT_RATIO_PERCENT, XDA_HIT_RATIO_PERCENT FROM SYSIBMADM.BP_HITRATIO" | grep -v "selected." | sed -e '/^$/d' -e '/^-/d' - - echo "<<>>" - echo "[[[$INSTANCE:$DB]]]" - db2 -x "get snapshot for database on $DB" | grep -e "^Total sorts" -e "^Sort overflows" | tr -d '=' - - echo "<<>>" - echo "[[[$INSTANCE:$DB]]]" - if compare_version_greater_equal "$VERSION_NUMBER" 10.5; then - # MON_GET_DATBASE(-2) gets information of all active members - db2 -x "select LAST_BACKUP from TABLE (MON_GET_DATABASE(-2))" | grep -v "selected." | tail -n 1 - else - db2 -x "select SQLM_ELM_LAST_BACKUP from table(SNAPSHOT_DATABASE( cast( null as VARCHAR(255)), cast(null as int))) as ref" | grep -v "selected." | tail -n 1 - fi - - # disconnect from database - db2 connect reset > /dev/null - fi -fi - -exit 0 diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_docker_container_piggybacked b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_docker_container_piggybacked deleted file mode 100755 index 36cf98ca..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_docker_container_piggybacked +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2018 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# $REMOTE is exported from check_mk_agent.linux - -if type docker > /dev/null 2>&1; then - NODE_NAME=$(docker info --format "{{json .Name}}") - - # For the container status, we want information about *all* containers - for CONTAINER_ID in $(docker container ls -q --all); do - echo "<<<<${CONTAINER_ID}>>>>" - docker inspect "$CONTAINER_ID" \ - --format='{{println "<<>>"}}{{json .State}}{{println}}{{println "<<>>"}}{{println '"$NODE_NAME"'}}{{println "<<>>"}}{{json .Config.Labels}}{{println}}{{println "<<>>"}}{{json .NetworkSettings}}{{println}}' - - if [ "$(docker inspect -f '{{.State.Running}}' "$CONTAINER_ID")" = "true" ]; then - # Is there a regular agent available in the container? Use it! - # - # Otherwise execute the agent of the node in the context of the container. - # Using this approach we should always get at least basic information from - # the container. - # Once it comes to plugins and custom configuration the user needs to use - # a little more complex setup. Have a look at the documentation. - AGENT_PATH=$(docker container exec "$CONTAINER_ID" bash -c "type check_mk_agent" 2>/dev/null) || AGENT_PATH= - if [ -n "$AGENT_PATH" ]; then - docker container exec --env "REMOTE=$REMOTE" "$CONTAINER_ID" check_mk_agent - elif docker container exec "$CONTAINER_ID" which bash >/dev/null 2>&1; then - docker container exec --env MK_FROM_NODE=1 --env "REMOTE=$REMOTE" -i "$CONTAINER_ID" bash < "$0" - fi - fi - - echo "<<<<>>>>" - done -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_filehandler b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_filehandler deleted file mode 100755 index 64668e78..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_filehandler +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -FILE=/proc/sys/fs/file-nr - -echo '<<>>' -if [ -a $FILE ]; then - cat $FILE -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_haproxy.freebsd b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_haproxy.freebsd deleted file mode 100755 index 7c7a0fe7..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_haproxy.freebsd +++ /dev/null @@ -1,5 +0,0 @@ -if [ -r /var/run/haproxy.stat ]; then - echo "<<>>" - echo "show stat" | socat - UNIX-CONNECT:/var/run/haproxy.sock -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_informix b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_informix deleted file mode 100755 index 5f83ce61..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_informix +++ /dev/null @@ -1,296 +0,0 @@ -#!/bin/bash -# Monitor status of LUNs on HP-UX -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Make ENV-VARs avail for subshells -set -a - -# .--helper--------------------------------------------------------------. -# | _ _ | -# | | |__ ___| |_ __ ___ _ __ | -# | | '_ \ / _ \ | '_ \ / _ \ '__| | -# | | | | | __/ | |_) | __/ | | -# | |_| |_|\___|_| .__/ \___|_| | -# | |_| | -# '----------------------------------------------------------------------' - - -function do_check () { - # $1:section, $2:excludelist - if echo "$2" | grep -qe "${1}"; then - return 1 - else - return 0 - fi -} - - -function sql () { - db="sysmaster" - sqltxt="$1" - dbaccess_par= - export DBDELIMITER="|" - echo "$sqltxt" | dbaccess ${db} -} - - -function set_excludes () { - excludes="" - if [ "$EXCLUDES" = "ALL" ]; then - excludes="$all_sections" - global_exclude=true - elif [ ! -z "$EXCLUDES" ]; then - excludes=$EXCLUDES - global_exclude=true - else - global_exclude=false - fi - - if [ "$global_exclude" = "false" ]; then - excludes_i="EXCLUDES_${1}" - if [ "${!excludes_i}" = "ALL" ]; then - excludes="$all_sections" - elif [ ! -z "${!excludes_i}" ]; then - excludes=${!excludes_i} - fi - fi -} - - -#. -# .--sqls----------------------------------------------------------------. -# | _ | -# | ___ __ _| |___ | -# | / __|/ _` | / __| | -# | \__ \ (_| | \__ \ | -# | |___/\__, |_|___/ | -# | |_| | -# '----------------------------------------------------------------------' - - -all_sections="sessions locks tabextents dbspaces logusage" - - -function informix_status(){ - echo "<<>>" - echo "[[[$INFORMIXSERVER/$SERVERNUM]]]" - $INFORMIXDIR/bin/onstat - >/dev/null 2>&1 - state=$? - echo "Status:"$state - $INFORMIXDIR/bin/onstat -g dis - port=$(grep $INFORMIXSERVER /etc/services) - echo "PORT:"$port -} - - -function informix_sessions(){ - echo "<<>>" - echo "[[[$INFORMIXSERVER/$SERVERNUM]]]" - # don't count our own session - sql "select 'SESSIONS', (count(*)-1)::int from syssessions" -} - - -function informix_locks(){ - echo "<<>>" - echo "[[[$INFORMIXSERVER/$SERVERNUM]]]" - # don't count our own session - sql "select 'LOCKS', (count(*)-1)::int, type from syslocks group by type" -} - - -function informix_tabextents(){ - echo "<<>>" - echo "[[[$INFORMIXSERVER/$SERVERNUM]]]" - sql "select first 10 - 'TABEXTENTS', - trim(n.dbsname) db, - trim(n.tabname) tab, - h.nextns extents, - nrows - from sysptnhdr h, systabnames n - where h.partnum = n.partnum - and nrows > 0 - and n.dbsname not in ( 'sysadmin', 'sysuser', 'sysutils', 'sysmaster' ) - and n.tabname not like 'sys%' - order by extents desc" -} - - -function informix_dbspaces(){ - echo "<<>>" - echo "[[[$INFORMIXSERVER/$SERVERNUM]]]" - sql "select - trim(sd.name) || ' DBSPACE', - sd.dbsnum, - sd.is_temp, - sd.flags, - 'CHUNK', - sc.fname, - sc.pagesize, - sc.chksize, - sc.nfree, - sc.flags, - trim(sc.mfname), - sc.mflags - from sysdbspaces sd, syschunks sc - where sd.dbsnum = sc.dbsnum - -- NO SBSPACE CURRENTLY - and sd.is_sbspace = 0 - order by sd.name" -} - - -function informix_logusage(){ - echo "<<>>" - echo "[[[$INFORMIXSERVER/$SERVERNUM]]]" - sql "select 'LOGUSAGE', - number, - sh_pagesize, - size, - used, - flags, - 'is_used:'||is_used, - 'is_current:'||is_current, - 'is_backed_up:'||is_backed_up, - 'is_new:'||is_new, - 'is_archived:'||is_archived, - 'is_temp:'||is_temp, - 'is_pre_dropped:'||is_pre_dropped - from syslogs, sysshmvals - order by number" -} - - -#. -# .--config--------------------------------------------------------------. -# | __ _ | -# | ___ ___ _ __ / _(_) __ _ | -# | / __/ _ \| '_ \| |_| |/ _` | | -# | | (_| (_) | | | | _| | (_| | | -# | \___\___/|_| |_|_| |_|\__, | | -# | |___/ | -# '----------------------------------------------------------------------' - - -# Config opts: -# - oninit-path; Default is empty, which means autodetection: -# ONINIT_PATH= -# - Excluding sections ("status sessions locks tabextents dbspaces logusage"): -# EXCLUDES_INFORMIX_INSTANCE="SECTION SECTION ..." -# EXCLUDES_INFORMIX_INSTANCE=ALL -# EXCLUDES="SECTION SECTION ..." -# EXCLUDES=ALL - - -if [ -f "$MK_CONFDIR/informix.cfg" ]; then - . $MK_CONFDIR/informix.cfg -fi - - -if [ -z "$ONINIT_PATH" -o ! -x "$ONINIT_PATH" ]; then - ONINIT=$(UNIX95=true ps ax | grep oninit | grep -v grep | head -1 | awk '{print $1 " " $5}') - if [ -z "$ONINIT" ]; then - exit 0 - fi - - ONINIT_PATH=${ONINIT#* } - ONINIT_PID=${ONINIT% *} - case "$ONINIT_PATH" in - /*) - ;; - *) # BUG not platform independent! - ONINIT_PATH=$(ls -l /proc/$ONINIT_PID/exe 2>/dev/null| sed 's/.* //') - ;; - esac - - # If not set in config or not found we end up here - if [ -z "$ONINIT_PATH" -o ! -f "$ONINIT_PATH" ]; then - exit 1 - fi -fi - - -#. -# .--main----------------------------------------------------------------. -# | _ | -# | _ __ ___ __ _(_)_ __ | -# | | '_ ` _ \ / _` | | '_ \ | -# | | | | | | | (_| | | | | | | -# | |_| |_| |_|\__,_|_|_| |_| | -# | | -# '----------------------------------------------------------------------' - - -for IDSENV in $( export INFORMIXDIR=${ONINIT_PATH%/bin*} - $INFORMIXDIR/bin/onstat -g dis | \ - egrep '^Server[ ]*:|^Server Number[ ]*:|^INFORMIX|^SQLHOSTS|^ONCONFIG' | \ - sed -e 's/Server Number/SERVERNUM/' \ - -e 's/Server/INFORMIXSERVER/' \ - -e 's/SQLHOSTS/INFORMIXSQLHOSTS/' \ - -e 's/[ ]*:[ ]*/=/' | \ - tr '\n' ';' | \ - sed -e 's/;$/\n/' -e 's/;\(INFORMIXSERVER=[^;]*;\)/\n\1/g' - - ) ; do - ( - # Set environment - eval $IDSENV - PATH=$INFORMIXDIR/bin:$PATH - - # try to set them via 'onstat -g env' otherwise - # DB HAS TO BE RUNNING - if [ -z "$INFORMIXSQLHOSTS" -o -z "$ONCONFIG" ]; then - onstat -g env | egrep -e '^INFORMIXSQLHOSTS' \ - -e '^ONCONFIG' | \ - sed -e 's/[ ][ ]*/=/' - fi - - informix_status - - set_excludes $INFORMIXSERVER - - if do_check "sessions" "$excludes"; then - informix_sessions - fi - - if do_check "locks" "$excludes"; then - informix_locks - fi - - if do_check "tabextents" "$excludes"; then - informix_tabextents - fi - - if do_check "dbspaces" "$excludes"; then - informix_dbspaces - fi - - if do_check "logusage" "$excludes"; then - informix_logusage - fi - ) -done - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inotify b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inotify deleted file mode 100755 index 33fd1bef..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inotify +++ /dev/null @@ -1,357 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2016 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import os -import sys -import time -import signal -import ConfigParser - -try: - # TODO: We should probably ship this package. - import pyinotify # pylint: disable=import-error -except: - sys.stderr.write("Error: Python plugin pyinotify is not installed") - sys.exit(1) - -def usage(): - sys.stdout.write("Usage: mk_inotify [-g]\n") - sys.stdout.write(" -g: run in foreground\n\n") - -# Available options: -# -g: run in foreground -opt_foreground = False -if len(sys.argv) == 2 and sys.argv[1] == "-g": - opt_foreground = True - -mk_confdir = os.getenv("MK_CONFDIR") or "/etc/check_mk" -mk_vardir = os.getenv("MK_VARDIR") or "/var/lib/check_mk_agent" - -config_filename = mk_confdir + "/mk_inotify.cfg" -configured_paths = mk_vardir + "/mk_inotify.configured" -pid_filename = mk_vardir + "/mk_inotify.pid" - - -config = ConfigParser.SafeConfigParser({}) -if not os.path.exists(config_filename): - sys.exit(0) -config_mtime = os.stat(config_filename).st_mtime -config.read(config_filename) - -# Configurable in Agent Bakery -heartbeat_timeout = config.getint("global", "heartbeat_timeout") -write_interval = config.getint("global", "write_interval") -max_messages_per_interval = config.getint("global", "max_messages_per_interval") -stats_retention = config.getint("global", "stats_retention") -config.remove_section("global") - -def output_data(): - sys.stdout.write("<<>>\n") - if os.path.exists(configured_paths): - sys.stdout.write(file(configured_paths).read()) - - now = time.time() - for dirpath, _unused_dirnames, filenames in os.walk(mk_vardir): - for filename in filenames: - if filename.startswith("mk_inotify.stats"): - try: - the_file = "%s/%s" % (dirpath, filename) - filetime = os.stat(the_file).st_mtime - file_age = now - filetime - if file_age > 5: - sys.stdout.write(file(the_file).read()) - if file_age > stats_retention: - os.unlink(the_file) - except: - pass - break - -# Check if another mk_inotify process is already running -if os.path.exists(pid_filename): - pid = file(pid_filename).read() - proc_cmdline = "/proc/%s/cmdline" % pid - if os.path.exists(proc_cmdline): - cmdline = file(proc_cmdline).read() - cmdline_tokens = cmdline.split("\0") - if "mk_inotify" in cmdline_tokens[1]: - # Another mk_notify process is already running.. - # Simply output the current statistics and exit - output_data() - - # The pidfile is also the heartbeat file for the running process - os.utime(pid_filename, None) - sys.exit(0) - - -# .--Fork----------------------------------------------------------------. -# | _____ _ | -# | | ___|__ _ __| | __ | -# | | |_ / _ \| '__| |/ / | -# | | _| (_) | | | < | -# | |_| \___/|_| |_|\_\ | -# | | -# +----------------------------------------------------------------------+ -# Reaching this point means that no mk_inotify is currently running - -if not opt_foreground: - try: - pid = os.fork() - if pid > 0: - sys.exit(0) - # Decouple from parent environment - os.chdir("/") - os.umask(0) - os.setsid() - - # Close all fd - for fd in range(0, 256): - try: - os.close(fd) - except OSError: - pass - except Exception, e: - sys.stderr.write("Error forking mk_inotify: %s" % e) - - # Save pid of working process. - file(pid_filename, "w").write("%d" % os.getpid()) -#. -# .--Main----------------------------------------------------------------. -# | __ __ _ | -# | | \/ | __ _(_)_ __ | -# | | |\/| |/ _` | | '_ \ | -# | | | | | (_| | | | | | | -# | |_| |_|\__,_|_|_| |_| | -# | | -# +----------------------------------------------------------------------+ - -folder_configs = {} # Computed configuration -output = [] # Data to be written to disk -def get_watched_files(): - files = set([]) - for folder, attributes in folder_configs.items(): - for filenames in attributes["monitor_files"].values(): - for filename in filenames: - files.add("configured\tfile\t%s/%s" % (folder, filename)) - if attributes.get("monitor_all"): - files.add("configured\tfolder\t%s" % (folder)) - return files - -def wakeup_handler(signum, frame): - global output - if output: - if opt_foreground: - sys.stdout.write("%s\n" % "\n".join(output)) - sys.stdout.write("%s\n" % "\n".join(get_watched_files())) - else: - filename = "mk_inotify.stats.%d" % time.time() - file("%s/%s" % (mk_vardir, filename), "w").write("\n".join(output)+"\n") - output = [] - - # Check if configuration has changed -> restart - if (config_mtime != os.stat(config_filename).st_mtime): - os.execv(__file__, sys.argv) - - # Exit on various instances - if not opt_foreground: - if not os.path.exists(pid_filename): # pidfile is missing - sys.exit(0) - if time.time() - os.stat(pid_filename).st_mtime > heartbeat_timeout: # heartbeat timeout - sys.exit(0) - if os.getpid() != int(file(pid_filename).read()): # pidfile differs - sys.exit(0) - - update_watched_folders() - signal.alarm(write_interval) - -def do_output(what, event): - if event.dir: - return # Only monitor files - - if len(output) > max_messages_per_interval: - last_message = "warning\tMaximum messages reached: %d per %d seconds" % \ - (max_messages_per_interval, write_interval) - if output[-1] != last_message: - output.append(last_message) - return - - path = event.path - path_config = folder_configs.get(path) - if not path_config: - return # shouldn't happen, maybe on subfolders (not supported) - - filename = os.path.basename(event.pathname) - if what in path_config["monitor_all"] or\ - filename in path_config["monitor_files"].get(what, []): - line = "%d\t%s\t%s" % (time.time(), what, event.pathname) - if map_events[what][1]: # Check if filestats are enabled - try: - stats = os.stat(event.pathname) - line += "\t%d\t%d" % (stats.st_size, stats.st_mtime) - except Exception: - pass - output.append(line) - if opt_foreground: - sys.stdout.write("%s\n" % line) - - -map_events = { - # Mode Mask Report_filestats (currently unused) - "access" : (pyinotify.IN_ACCESS, False), # pylint: disable=no-member - "open" : (pyinotify.IN_OPEN, False), # pylint: disable=no-member - "create" : (pyinotify.IN_CREATE, False), # pylint: disable=no-member - "delete" : (pyinotify.IN_DELETE, False), # pylint: disable=no-member - "modify" : (pyinotify.IN_MODIFY, False), # pylint: disable=no-member - "movedto" : (pyinotify.IN_MOVED_TO, False), # pylint: disable=no-member - "movedfrom": (pyinotify.IN_MOVED_FROM, False), # pylint: disable=no-member - "moveself" : (pyinotify.IN_MOVE_SELF, False), # pylint: disable=no-member -} - -class NotifyEventHandler(pyinotify.ProcessEvent): - def process_IN_MOVED_TO(self, event): - do_output("movedto", event) - - def process_IN_MOVED_FROM(self, event): - do_output("movedfrom", event) - - def process_IN_MOVE_SELF(self, event): - do_output("moveself", event) -# def process_IN_CLOSE_NOWRITE(self, event): -# print "CLOSE_NOWRITE event:", event.pathname -# -# def process_IN_CLOSE_WRITE(self, event): -# print "CLOSE_WRITE event:", event.pathname - - def process_IN_CREATE(self, event): - do_output("create", event) - - def process_IN_DELETE(self, event): - do_output("delete", event) - - def process_IN_MODIFY(self, event): - do_output("modify", event) - - def process_IN_OPEN(self, event): - do_output("open", event) - - -# Watch manager -wm = pyinotify.WatchManager() -def update_watched_folders(): - for folder, attributes in folder_configs.items(): - if attributes.get("watch_descriptor"): - if not wm.get_path(attributes["watch_descriptor"].get(folder)): - del attributes["watch_descriptor"] - else: - if os.path.exists(folder): - new_wd = wm.add_watch(folder, attributes["mask"], rec=True) - if new_wd.get(folder) > 0: - attributes["watch_descriptor"] = new_wd - - -def main(): - # Read config - - for section in config.sections(): - section_tokens = section.split("|") - - folder = section_tokens[0] - folder_configs.setdefault(folder, {"add_modes": {}, - "del_modes": {}, - "all_add_modes": set([]), - "all_del_modes": set([])}) - - files = None - if len(section_tokens) > 1: - files = set(section_tokens[1:]) - - add_modes = set([]) - del_modes = set([]) - for key, value in config.items(section): - if key in map_events: - if value == "1": - add_modes.add(key) - else: - del_modes.add(key) - - if files: - for mode in add_modes: - folder_configs[folder]["add_modes"].setdefault(mode, set([])) - folder_configs[folder]["add_modes"][mode].update(files) - for mode in del_modes: - folder_configs[folder]["del_modes"].setdefault(mode, set([])) - folder_configs[folder]["del_modes"][mode].update(files) - else: - folder_configs[folder]["all_add_modes"].update(add_modes) - folder_configs[folder]["all_del_modes"].update(del_modes) - - - # Evaluate config - for folder, attributes in folder_configs.items(): - required_modes = set([]) - for mode in attributes["add_modes"].keys(): - if mode not in attributes["all_del_modes"]: - required_modes.add(mode) - - files_to_monitor = {} - skip_modes = set([]) - for mode in required_modes: - files_to_monitor.setdefault(mode, set([])) - files_to_monitor[mode].update(attributes["add_modes"][mode]) - files_to_monitor[mode] -= attributes["del_modes"].get(mode, set([])) - if not files_to_monitor[mode]: - skip_modes.add(mode) - - attributes["monitor_files"] = files_to_monitor - attributes["monitor_all"] = attributes["all_add_modes"] - attributes["all_del_modes"] - attributes["modes"] = required_modes - skip_modes - - # Determine mask - attributes["mask"] = 0 - for mode in attributes["modes"]: - attributes["mask"] |= map_events[mode][0] - for mode in attributes["monitor_all"]: - attributes["mask"] |= map_events[mode][0] - - update_watched_folders() - if opt_foreground: - import pprint - sys.stdout.write(pprint.pformat(folder_configs)) - - # Save monitored file/folder information specified in mk_inotify.cfg - file(configured_paths, "w").write("\n".join(get_watched_files())+"\n") - - # Event handler - eh = NotifyEventHandler() - notifier = pyinotify.Notifier(wm, eh) - - # Wake up every few seconds, check heartbeat and write data to disk - signal.signal(signal.SIGALRM, wakeup_handler) - signal.alarm(write_interval) - - notifier.loop() - -if __name__ == '__main__': - main() diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.aix b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.aix deleted file mode 100755 index 602d2bac..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.aix +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Run and *send* only once every 4 hours -INTERVAL=14400 - -FLAGFILE=$MK_VARDIR/mk_inventory.last.$REMOTE -NOW=$(date +%s) -UNTIL=$((NOW + INTERVAL + 600)) - -#check if flagfile exits -if [ -e "$FLAGFILE" ]; then - LAST_RUN=$(cat $FLAGFILE) -else - #First run of the script - LAST_RUN=0 -fi - -if [ $(( NOW - LAST_RUN )) -ge $INTERVAL ] -then - echo $NOW > $FLAGFILE - - # List of installed AIX packages - if type lslpp >/dev/null; then - echo "<<>>" - lslpp -c -L - fi - - if type oslevel > /dev/null; then - # base level of the system - echo "<<>>" - oslevel - - # list the known service packs on a system - echo "<<>>" - oslevel -sq - fi - - # If you run the prtconf command without any flags, it displays the system model, machine serial, - # processor type, number of processors, processor clock speed, cpu type, total memory size, network information, filesystem - # information, paging space information, and devices information. - if type prtconf >/dev/null ; then - echo "<<>>" - prtconf - fi -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.linux b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.linux deleted file mode 100755 index 6428a613..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.linux +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Run and *send* only once every __ seconds -. $MK_CONFDIR/mk_inventory.cfg 2>/dev/null || true - -# Default to four hours -INTERVAL=${INVENTORY_INTERVAL:-14400} - -FLAGFILE=$MK_VARDIR/mk_inventory.last.$REMOTE -LAST_RUN=$(stat -c %Y $FLAGFILE) -NOW=$(date +%s) -UNTIL=$((NOW + INTERVAL + 600)) - -if [ $(( NOW - LAST_RUN )) -ge $INTERVAL ] -then - touch $FLAGFILE - - # List of DEB packages - if type dpkg-query >/dev/null; then - echo "<<>>" - dpkg-query --show --showformat='${Package}|${Version}|${Architecture}|deb|-|${Summary}|${Status}\n' - fi - - # List of RPM packages in same format - if type rpm >/dev/null; then - echo "<<>>" - rpm -qa --qf '%{NAME}\t%{VERSION}\t%{ARCH}\trpm\t%{RELEASE}\t%{SUMMARY}\t-\n' - fi - - # List Gentoo packages - if type equery >/dev/null; then - echo "<<>>" - equery -C list --format '$category/$name|$fullversion|$mask2|ebuild|Repository $repo|installed' \* | head -n -1 - fi - - # Information about distribution - echo "<<>>" - for f in {/etc/{oracle-release,debian_version,gentoo-release,lsb-release,redhat-release,SuSE-release,os-release},/usr/share/cma/version} ; do - if [ -e $f ] ; then - echo "[[[$f]]]" - tr \\n \| < $f | sed 's/|$//' ; echo - fi - done - - # CPU Information. We need just the first one - if [ -e /proc/cpuinfo ] ; then - echo "<<>>" - sed 's/[[:space:]]*:[[:space:]]*/:/' < /proc/cpuinfo - fi - - # Information about main board, memory, etc. - if type dmidecode >/dev/null ; then - echo "<<>>" - dmidecode -q | sed 's/\t/:/g' - fi - - # Information about kernel architecture - if type uname >/dev/null ; then - echo "<<>>" - uname -m - uname -r - fi - if type lspci > /dev/null ; then - echo "<<>>" - lspci -v -s $(lspci | grep VGA | cut -d" " -f 1) - fi - - # Some networking information - if type ip > /dev/null ; then - echo "<<>>" - ip a - echo "<<>>" - ip r - fi - -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.solaris b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.solaris deleted file mode 100755 index 302d01ea..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_inventory.solaris +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Run and *send* only once every __ seconds -. $MK_CONFDIR/mk_inventory.cfg 2>/dev/null || true - -# Default to four hours -INTERVAL=${INVENTORY_INTERVAL:-14400} - -FLAGFILE=$MK_VARDIR/mk_inventory.last.$REMOTE -if [ `uname -r` = "5.10" ]; then - NOW=$(truss /usr/bin/date 2>&1 | grep ^time | awk -F"= " '{print $2}') -else - NOW=`date +%s` -fi -UNTIL=$((NOW + INTERVAL + 600)) - -#check if flagfile exits -if [ -e "$FLAGFILE" ]; then - LAST_RUN=$(cat $FLAGFILE) -else - #First run of the script - LAST_RUN=0 -fi - -if [ $(( NOW - LAST_RUN )) -ge $INTERVAL ] -then - echo $NOW > $FLAGFILE - - echo "<<>>" - uname -X - - if zoneadm list | grep global >/dev/null 2>&1 - then - if type prtdiag > /dev/null; then - echo "<<>>" - if type sneep >/dev/null 2>&1; then - SN=$(sneep -t serial) - else - SN=$(smbios -t SMB_TYPE_SYSTEM | grep 'Serial Number:' | awk '{print substr($0, index($0,$3))}') - fi - echo "SerialNumber: $SN" - prtdiag -v - fi - - if type prtpicl > /dev/null; then - echo "<<>>" - prtpicl -v - fi - fi - - if type psrinfo > /dev/null; then - echo "<<>>" - psrinfo -p -v - fi - - if type pkginfo >/dev/null ; then - echo "<<>>" - pkginfo -l - fi - - echo "<<>>" - ifconfig -a - - echo "<<>>" - netstat -nr - -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_iptables b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_iptables deleted file mode 100755 index 42beb34f..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_iptables +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# iptables -if type iptables-save > /dev/null -then - echo "<<>>" - # output filter configuration without table name, comments and - # status data, i.e. lines beginning with '*', '#' or ':'. - iptables-save -t filter | sed '/^[#*:]/d' -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_jolokia b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_jolokia deleted file mode 100755 index 6a2e23a0..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_jolokia +++ /dev/null @@ -1,523 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import urllib2, sys, os, socket, base64, ssl -from httplib import HTTPConnection, HTTPSConnection - -try: - from simplejson import json -except ImportError: - try: - import json - except ImportError: - sys.stdout.write("<<>>\n") - sys.stdout.write("Error: Missing JSON library for Agent Plugin mk_jolokia\n") - exit() - -opt_verbose = '--verbose' in sys.argv -opt_debug = '--debug' in sys.argv - -class PreemptiveBasicAuthHandler(urllib2.HTTPBasicAuthHandler): - """ - sends basic authentication with the first request, - before the server even asks for it - """ - - def http_request(self, req): - url = req.get_full_url() - realm = None - user, pw = self.passwd.find_user_password(realm, url) - if pw: - raw = "%s:%s" % (user, pw) - auth = 'Basic %s' % base64.b64encode(raw).strip() - req.add_unredirected_header(self.auth_header, auth) - return req - - https_request = http_request - -class HTTPSValidatingConnection(HTTPSConnection): - def __init__(self, host, ca_file, key_file, cert_file): - HTTPSConnection.__init__(self, host, key_file=key_file, cert_file=cert_file) - self.__ca_file = ca_file - self.__key_file = key_file - self.__cert_file = cert_file - - def connect(self): - HTTPConnection.connect(self) - if self.__ca_file: - self.sock = ssl.wrap_socket(self.sock, keyfile=self.key_file, certfile=self.cert_file, - ca_certs=self.__ca_file, cert_reqs=ssl.CERT_REQUIRED) - else: - self.sock = ssl.wrap_socket(self.sock, keyfile=self.key_file, certfile=self.cert_file, - ca_certs=self.__ca_file, cert_reqs=ssl.CERT_NONE) - - -class HTTPSAuthHandler(urllib2.HTTPSHandler): - def __init__(self, ca_file, key, cert): - urllib2.HTTPSHandler.__init__(self) - self.__ca_file = ca_file - self.__key = key - self.__cert = cert - - def https_open(self, req): - # do_open expects a class as the first parameter but getConnection will act - # as a facotry function - return self.do_open(self.getConnection, req) - - def getConnection(self, host, timeout): - return HTTPSValidatingConnection(host, ca_file=self.__ca_file, - key_file=self.__key, cert_file=self.__cert) - -def fetch_url_get(base_url, path, function): - if path: - url = "%s/%s/%s" % (base_url, function, path) - else: - url = base_url + "/" - - if opt_verbose: - sys.stderr.write("DEBUG: Fetching: %s\n" % url) - try: - json_data = urllib2.urlopen(url).read() - if opt_verbose: - sys.stderr.write("DEBUG: Result: %s\n\n" % json_data) - except Exception, e: - if opt_debug: - raise - sys.stderr.write("ERROR: %s\n" % e) - return [] - return json_data - - -def fetch_url_post(base_url, path, service_url, service_user, service_password, function): - segments = path.split("/") - - data = { - "type": function.upper(), - "mbean": segments[0], - "attribute": segments[1], - "target": { - "url": service_url, - }, - } - if len(segments) > 2: - data["path"] = segments[2] - - if service_user: - data["target"]["user"] = service_user - data["target"]["password"] = service_password - - if opt_verbose: - sys.stderr.write("DEBUG: Fetching: %s\n" % base_url) - try: - json_data = urllib2.urlopen(base_url, data=json.dumps(data)).read() - if opt_verbose: - sys.stderr.write("DEBUG: Result: %s\n\n" % json_data) - except Exception, e: - if opt_debug: - raise - sys.stderr.write("ERROR: %s\n" % e) - return [] - return json_data - -def fetch_var(protocol, server, port, path, suburi, itemspec, service_url, service_user, - service_password, function="read"): - base_url = "%s://%s:%d/%s" % (protocol, server, port, suburi) - - if service_url is not None: - json_data = fetch_url_post(base_url, path, - service_url, service_user, service_password, function) - else: - json_data = fetch_url_get(base_url, path, function) - - try: - obj = json.loads(json_data) - except Exception, e: - sys.stderr.write('ERROR: Invalid json code (%s)\n' % e) - sys.stderr.write(' Response %s\n' % json_data) - return [] - - if obj.get('status', 200) != 200: - sys.stderr.write('ERROR: Invalid response when fetching url %s\n' % base_url) - sys.stderr.write(' Response: %s\n' % json_data) - return [] - - # Only take the value of the object. If the value is an object - # take the first items first value. - # {'Catalina:host=localhost,path=\\/test,type=Manager': {'activeSessions': 0}} - if 'value' not in obj: - if opt_verbose: - sys.stderr.write("ERROR: not found: %s\n" % path) - return [] - val = obj.get('value', None) - return make_item_list((), val, itemspec) - -# convert single values into lists of items in -# case value is a 1-levelled or 2-levelled dict -def make_item_list(path, value, itemspec): - if type(value) != dict: - if type(value) == str: - value = value.replace(r'\/', '/') - return [(path, value)] - else: - result = [] - for key, subvalue in value.items(): - # Handle filtering via itemspec - miss = False - while itemspec and '=' in itemspec[0]: - if itemspec[0] not in key: - miss = True - break - itemspec = itemspec[1:] - if miss: - continue - item = extract_item(key, itemspec) - if not item: - item = (key,) - result += make_item_list(path + item, subvalue, []) - return result - -# Example: -# key = 'Catalina:host=localhost,path=\\/,type=Manager' -# itemsepc = [ "path" ] -# --> "/" - -def extract_item(key, itemspec): - path = key.split(":", 1)[-1] - components = path.split(",") - item = () - comp_dict = {} - for comp in components: - parts = comp.split("=") - if len(parts) == 2: - left, right = parts - comp_dict[left] = right - for pathkey in itemspec: - if pathkey in comp_dict: - right = comp_dict[pathkey] - right = right.replace(r'\/', '/') - if '/' in right: - right = '/' + right.split('/')[-1] - item = item + (right,) - return item - - -def fetch_metric(inst, path, title, itemspec, inst_add=None): - values = fetch_var(inst["protocol"], inst["server"], inst["port"], path, - inst["suburi"], itemspec, - inst["service_url"], inst["service_user"], inst["service_password"]) - - for subinstance, value in values: - if not subinstance and not title: - sys.stdout.write("INTERNAL ERROR: %s\n" % value) - continue - - if "threadStatus" in subinstance or "threadParam" in subinstance: - continue - - if len(subinstance) > 1: - item = ",".join((inst["instance"],) + subinstance[:-1]) - elif inst_add is not None: - item = inst["instance"] + "," + inst_add - else: - item = inst["instance"] - if title: - if subinstance: - tit = title + "." + subinstance[-1] - else: - tit = title - else: - tit = subinstance[-1] - - yield (item.replace(" ", "_"), tit, value) - - -def query_instance(inst): - # Prepare user/password authentication via HTTP Auth - password_mngr = urllib2.HTTPPasswordMgrWithDefaultRealm() - if inst.get("password"): - password_mngr.add_password(None, "%s://%s:%d/" % - (inst["protocol"], inst["server"], inst["port"]), inst["user"], inst["password"]) - - handlers = [] - if inst["protocol"] == "https": - if inst["mode"] == 'https' and (inst["client_key"] is None or - inst["client_cert"] is None): - sys.stdout.write('<<>>\n') - sys.stderr.write("ERROR: https set up as authentication method but certificate " - "wasn't provided\n") - return - handlers.append(HTTPSAuthHandler(inst["cert_path"], - inst["client_key"], inst["client_cert"])) - if inst["mode"] == 'digest': - handlers.append(urllib2.HTTPDigestAuthHandler(password_mngr)) - elif inst["mode"] == "basic_preemptive": - handlers.append(PreemptiveBasicAuthHandler(password_mngr)) - elif inst["mode"] == "basic" and inst["protocol"] != "https": - handlers.append(urllib2.HTTPBasicAuthHandler(password_mngr)) - - if handlers: - opener = urllib2.build_opener(*handlers) - urllib2.install_opener(opener) - - # Determine type of server - server_info = fetch_var(inst["protocol"], inst["server"], inst["port"], "", inst["suburi"], - "", None, None, None) - - sys.stdout.write('<<>>\n') - if server_info: - d = dict(server_info) - version = d.get(('info', 'version'), "unknown") - product = d.get(('info', 'product'), "unknown") - if inst.get("product"): - product = inst["product"] - agentversion = d.get(('agent',), "unknown") - sys.stdout.write("%s %s %s %s\n" % (inst["instance"], product, version, agentversion)) - else: - sys.stdout.write("%s ERROR\n" % (inst["instance"],)) - sys.stdout.write('<<>>\n') - sys.stdout.write("%s ERROR\n" % (inst["instance"],)) - return - - - mbean_search_results = {} - - sys.stdout.write('<<>>\n') - # Fetch the general information first - for var in global_vars + specific_vars.get(product, []): - # support old and new configuration format so we stay compatible with older - # configuration files - if len(var) == 3: - path, title, itemspec = var - mbean, path = path.split("/", 1) - do_search = False - else: - mbean, path, title, itemspec, do_search = var - - queries = [] - if do_search: - if mbean in mbean_search_results: - paths = mbean_search_results[mbean] - else: - paths = fetch_var(inst["protocol"], inst["server"], inst["port"], mbean, - inst["suburi"], "", None, None, None, function="search")[0][1] - mbean_search_results[mbean] = paths - - for mbean_exp in paths: - queries.append( (inst, "%s/%s" % (urllib2.quote(mbean_exp), path), path, - itemspec, mbean_exp) ) - else: - queries.append( (inst, mbean + "/" + path, title, itemspec) ) - - for inst, mbean_path, title, itemspec in queries: - try: - for out_item, out_title, out_value in fetch_metric(inst, mbean_path, title, itemspec): - sys.stdout.write("%s %s %s\n" % (out_item, out_title, out_value) ) - except IOError: - return - except socket.timeout: - return - except: - if opt_debug: - raise - # Simply ignore exceptions. Need to be removed for debugging - continue - - if custom_vars: - sys.stdout.write('<<>>\n') - for var in custom_vars: - mbean, path, title, itemspec, do_search, value_type = var - queries = [] - if do_search: - if mbean in mbean_search_results: - paths = mbean_search_results[mbean] - else: - paths = fetch_var(inst["protocol"], inst["server"], inst["port"], mbean, - inst["suburi"], "", None, None, None, function="search")[0][1] - mbean_search_results[mbean] = paths - - for mbean_exp in paths: - queries.append( (inst, "%s/%s" % (urllib2.quote(mbean_exp), path), path, - itemspec, mbean_exp) ) - else: - queries.append( (inst, mbean + "/" + path, title, itemspec) ) - - for inst, mbean_path, title, itemspec in queries: - try: - for out_item, out_title, out_value in fetch_metric(inst, mbean_path, title, itemspec): - sys.stdout.write("%s %s %s %s\n" % (out_item, out_title, out_value, value_type) ) - except IOError: - return - except socket.timeout: - return - except: - if opt_debug: - raise - # Simply ignore exceptions. Need to be removed for debugging - continue - - -# Default configuration for all instances -protocol = "http" -server = "localhost" -port = 8080 -user = "monitoring" -password = None -mode = "digest" -suburi = "jolokia" -instance = None -cert_path = "_default" -client_cert = None -client_key = None -service_url = None -service_user = None -service_password = None -product = None - -global_vars = [ - ( "java.lang:type=Memory", "NonHeapMemoryUsage/used", "NonHeapMemoryUsage", [], False), - ( "java.lang:type=Memory", "NonHeapMemoryUsage/max", "NonHeapMemoryMax", [], False), - ( "java.lang:type=Memory", "HeapMemoryUsage/used", "HeapMemoryUsage", [], False), - ( "java.lang:type=Memory", "HeapMemoryUsage/max", "HeapMemoryMax", [], False), - ( "java.lang:type=Threading", "ThreadCount", "ThreadCount", [], False), - ( "java.lang:type=Threading", "DaemonThreadCount", "DeamonThreadCount", [], False), - ( "java.lang:type=Threading", "PeakThreadCount", "PeakThreadCount", [], False), - ( "java.lang:type=Threading", "TotalStartedThreadCount", "TotalStartedThreadCount", [], False), - ( "java.lang:type=Runtime", "Uptime", "Uptime", [], False), - ( "java.lang:type=GarbageCollector,name=*", "CollectionCount", "", [], False), - ( "java.lang:type=GarbageCollector,name=*", "CollectionTime", "", [], False), - ( "java.lang:name=CMS%20Perm%20Gen,type=MemoryPool", "Usage/used", "PermGenUsage", [], False), - ( "java.lang:name=CMS%20Perm%20Gen,type=MemoryPool", "Usage/max", "PermGenMax", [], False), - - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OffHeapHits", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OnDiskHits", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "InMemoryHitPercentage", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "CacheMisses", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OnDiskHitPercentage", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "MemoryStoreObjectCount", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "DiskStoreObjectCount", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "CacheMissPercentage", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "CacheHitPercentage", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OffHeapHitPercentage", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "InMemoryMisses", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OffHeapStoreObjectCount", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "WriterQueueLength", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "WriterMaxQueueSize", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OffHeapMisses", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "InMemoryHits", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "AssociatedCacheName", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "ObjectCount", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "OnDiskMisses", "", [], True), - ( "net.sf.ehcache:CacheManager=CacheManagerApplication*,*,type=CacheStatistics", "CacheHits", "", [], True), -] - - -specific_vars = { - "weblogic" : [ - ( "*:*", "CompletedRequestCount", None, [ "ServerRuntime" ] , False), - ( "*:*", "QueueLength", None, [ "ServerRuntime" ] , False), - ( "*:*", "StandbyThreadCount", None, [ "ServerRuntime" ] , False), - ( "*:*", "PendingUserRequestCount", None, [ "ServerRuntime" ] , False), - ( "*:Name=ThreadPoolRuntime,*", "ExecuteThreadTotalCount", None, [ "ServerRuntime" ] , False), - ( "*:*", "ExecuteThreadIdleCount", None, [ "ServerRuntime" ] , False), - ( "*:*", "HoggingThreadCount", None, [ "ServerRuntime" ] , False), - ( "*:Type=WebAppComponentRuntime,*", "OpenSessionsCurrentCount", None, [ "ServerRuntime", "ApplicationRuntime" ] , False), - ], - "tomcat" : [ - ( "*:type=Manager,*", "activeSessions,maxActiveSessions", None, [ "path", "context" ] , False), - ( "*:j2eeType=Servlet,name=default,*", "stateName", None, [ "WebModule" ] , False), - # Check not yet working - ( "*:j2eeType=Servlet,name=default,*", "requestCount", None, [ "WebModule" ], False), - ( "*:name=*,type=ThreadPool", "maxThreads", None, [], False), - ( "*:name=*,type=ThreadPool", "currentThreadCount", None, [], False), - ( "*:name=*,type=ThreadPool", "currentThreadsBusy", None, [], False), - # too wide location for addressing the right info - # ( "*:j2eeType=Servlet,*", "requestCount", None, [ "WebModule" ] , False), - ], - "jboss" : [ - ( "*:type=Manager,*", "activeSessions,maxActiveSessions", None, [ "path", "context" ] , False), - ], -} - - -# ( '*:j2eeType=WebModule,name=/--/localhost/-/%(app)s,*/state', None, [ "name" ]), -# ( '*:j2eeType=Servlet,WebModule=/--/localhost/-/%(app)s,name=%(servlet)s,*/requestCount', None, [ "WebModule", "name" ]), -# ( "Catalina:J2EEApplication=none,J2EEServer=none,WebModule=*,j2eeType=Servlet,name=*", None, [ "WebModule", "name" ]), - - -# List of instances to monitor. Each instance is a dict where -# the global configuration values can be overridden. -instances = [{}] - -custom_vars = [] - -conffile = os.getenv("MK_CONFDIR", "/etc/check_mk") + "/jolokia.cfg" - -if os.path.exists(conffile): - execfile(conffile) - -if server == "use fqdn": - server = socket.getfqdn() - -if instance == None: - instance = str(port) - -# We have to deal with socket timeouts. Python > 2.6 -# supports timeout parameter for the urllib2.urlopen method -# but we are on a python 2.5 system here which seem to use the -# default socket timeout. We are local here so set it to 1 second. -socket.setdefaulttimeout(1.0) - -# Compute list of instances to monitor. If the user has defined -# instances in his configuration, we will use this (a list -# of dicts). -for inst in instances: - for varname, value in [ - ("protocol", protocol), - ("server", server), - ("port", port), - ("user", user), - ("password", password), - ("mode", mode), - ("suburi", suburi), - ("instance", instance), - ("cert_path", cert_path), - ("client_cert", client_cert), - ("client_key", client_key), - ("service_url", service_url), - ("service_user", service_user), - ("service_password", service_password), - ( "product", product ), - ]: - if varname not in inst: - inst[varname] = value - if not inst["instance"]: - inst["instance"] = str(inst["port"]) - inst["instance"] = inst["instance"].replace(" ", "_") - - if inst.get("server") == "use fqdn": - inst["server"] = socket.getfqdn() - - query_instance(inst) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logins b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logins deleted file mode 100755 index b4b8b646..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logins +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -if type who >/dev/null; then - echo "<<>>" - who | wc -l -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logwatch b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logwatch deleted file mode 100755 index db6a6066..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logwatch +++ /dev/null @@ -1,564 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Call with -d for debug mode: colored output, no saving of status - -import sys, os, re, time, glob - - -# .--MEI-Cleanup---------------------------------------------------------. -# | __ __ _____ ___ ____ _ | -# | | \/ | ____|_ _| / ___| | ___ __ _ _ __ _ _ _ __ | -# | | |\/| | _| | |_____| | | |/ _ \/ _` | '_ \| | | | '_ \ | -# | | | | | |___ | |_____| |___| | __/ (_| | | | | |_| | |_) | | -# | |_| |_|_____|___| \____|_|\___|\__,_|_| |_|\__,_| .__/ | -# | |_| | -# +----------------------------------------------------------------------+ -# In case the program crashes or is killed in a hard way, the frozen binary .exe -# may leave temporary directories named "_MEI..." in the temporary path. Clean them -# up to prevent eating disk space over time. - -######################################################################## -############## DUPLICATE CODE WARNING ################################## -### This code is also used in the cmk-update-agent frozen binary ####### -### Any changes to this class should also be made in cmk-update-agent ## -### In the bright future we will move this code into a library ######### -######################################################################## - -class MEIFolderCleaner(object): - def pid_running(self, pid): - import ctypes - kernel32 = ctypes.windll.kernel32 - SYNCHRONIZE = 0x100000 - - process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid) - - if process != 0: - kernel32.CloseHandle(process) - return True - else: - return False - - - def find_and_remove_leftover_folders(self, hint_filenames): - if not hasattr(sys, "frozen"): - return - - import win32file # pylint: disable=import-error - import tempfile - base_path = tempfile.gettempdir() - for f in os.listdir(base_path): - try: - path = os.path.join(base_path, f) - - if not os.path.isdir(path): - continue - - # Only care about directories related to our program - invalid_dir = False - for hint_filename in hint_filenames: - if not os.path.exists(os.path.join(path, hint_filename)): - invalid_dir = True - break - if invalid_dir: - continue - - pyinstaller_tmp_path = win32file.GetLongPathName(sys._MEIPASS).lower() # pylint: disable=no-member - if pyinstaller_tmp_path == path.lower(): - continue # Skip our own directory - - # Extract the process id from the directory and check whether or not it is still - # running. Don't delete directories of running processes! - # The name of the temporary directories is "_MEI". We try to extract the PID - # by stripping of a single digit from the right. In the hope the NR is a single digit - # in all relevant cases. - pid = int(f[4:-1]) - if self.pid_running(pid): - continue - - shutil.rmtree(path) - except Exception, e: - # TODO: introduce verbose mode for mk_logwatch - pass -#. - -os_type = "linux" -try: - import platform - os_type = platform.system().lower() -except: - pass - -if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]: - tty_red = '\033[1;31m' - tty_green = '\033[1;32m' - tty_yellow = '\033[1;33m' - tty_blue = '\033[1;34m' - tty_normal = '\033[0m' - debug = True -else: - tty_red = '' - tty_green = '' - tty_yellow = '' - tty_blue = '' - tty_normal = '' - debug = False - -# The configuration file and status file are searched -# in the directory named by the environment variable -# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used. -# If that is not set either, the current directory ist -# used. -logwatch_dir = os.getenv("LOGWATCH_DIR") -if logwatch_dir: - mk_confdir = logwatch_dir - mk_vardir = logwatch_dir -else: - mk_confdir = os.getenv("MK_CONFDIR") or "." - mk_vardir = os.getenv("MK_VARDIR") or os.getenv("MK_STATEDIR") or "." - - -sys.stdout.write("<<>>\n") - -config_filename = mk_confdir + "/logwatch.cfg" -config_dir = mk_confdir + "/logwatch.d/*.cfg" - - -# Determine the name of the state file -# $REMOTE set -> logwatch.state.$REMOTE -# $REMOTE not set and a tty -> logwatch.state.local -# $REMOTE not set and not a tty -> logwatch.state -remote_hostname = os.getenv("REMOTE", "") -remote_hostname = remote_hostname.replace(":", "_") -if remote_hostname != "": - status_filename = "%s/logwatch.state.%s" % (mk_vardir, remote_hostname) -else: - if sys.stdout.isatty(): - status_filename = "%s/logwatch.state.local" % mk_vardir - else: - status_filename = "%s/logwatch.state" % mk_vardir - -# Copy the last known state from the logwatch.state when there is no status_filename yet. -if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % mk_vardir): - import shutil - shutil.copy("%s/logwatch.state" % mk_vardir, status_filename) - -def is_not_comment(line): - if line.lstrip().startswith('#') or \ - line.strip() == '': - return False - return True - -def parse_filenames(line): - return line.split() - -def parse_pattern(level, pattern, line): - if level not in [ 'C', 'W', 'I', 'O' ]: - raise Exception("Invalid pattern line '%s'" % line) - - try: - compiled = re.compile(pattern) - except: - raise Exception("Invalid regular expression in line '%s'" % line) - - return (level, compiled) - -def read_config(): - config_lines = [] - try: - config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ] - except IOError, e: - if debug: - raise - - # Add config from a logwatch.d folder - for config_file in glob.glob(config_dir): - config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ] - - have_filenames = False - config = [] - cont_list = [] - rewrite_list = [] - - for line in config_lines: - if line[0].isspace(): # pattern line - if not have_filenames: - raise Exception("Missing logfile names") - - level, pattern = line.split(None, 1) - - if level == 'A': - cont_list.append(parse_cont_pattern(pattern)) - elif level == 'R': - rewrite_list.append(pattern) - else: - level, compiled = parse_pattern(level, pattern, line) - # New pattern for line matching => clear continuation and rewrite patterns - cont_list = [] - rewrite_list = [] - # TODO: Fix the code and remove the pragma below! - patterns.append((level, compiled, cont_list, rewrite_list)) # pylint: disable=used-before-assignment - - else: # filename line - patterns = [] - cont_list = [] # Clear list of continuation patterns from last file - rewrite_list = [] # Same for rewrite patterns - config.append((parse_filenames(line), patterns)) - have_filenames = True - return config - -def parse_cont_pattern(pattern): - try: - return int(pattern) - except: - try: - return re.compile(pattern) - except: - if debug: - raise - raise Exception("Invalid regular expression in line '%s'" % pattern) - -# structure of statusfile -# # LOGFILE OFFSET INODE -# /var/log/messages|7767698|32455445 -# /var/test/x12134.log|12345|32444355 -def read_status(): - if debug: - return {} - - status = {} - for line in file(status_filename): - # TODO: Remove variants with spaces. rsplit is - # not portable. split fails if logfilename contains - # spaces - inode = -1 - try: - parts = line.split('|') - filename = parts[0] - offset = parts[1] - if len(parts) >= 3: - inode = parts[2] - - except: - try: - filename, offset = line.rsplit(None, 1) - except: - filename, offset = line.split(None, 1) - status[filename] = int(offset), int(inode) - return status - -def save_status(status): - f = file(status_filename, "w") - for filename, (offset, inode) in status.items(): - f.write("%s|%d|%d\n" % (filename, offset, inode)) - -pushed_back_line = None -def next_line(file_handle): - global pushed_back_line - if pushed_back_line != None: - line = pushed_back_line - pushed_back_line = None - return line - else: - try: - line = file_handle.next() - # Avoid parsing of (yet) incomplete lines (when acutal application - # is just in the process of writing) - # Just check if the line ends with a \n. This handles \n and \r\n - if not line.endswith("\n"): - begin_of_line_offset = file_handle.tell() - len(line) - os.lseek(file_handle.fileno(), begin_of_line_offset, 0) - return None - return line - except: - return None - - -def is_inode_cabable(path): - if "linux" in os_type: - return True - elif "windows" in os_type: - volume_name = "%s:\\\\" % path.split(":", 1)[0] - import win32api # pylint: disable=import-error - volume_info = win32api.GetVolumeInformation(volume_name) - volume_type = volume_info[-1] - if "ntfs" in volume_type.lower(): - return True - else: - return False - else: - return False - - -def process_logfile(logfile, patterns): - global pushed_back_line - - # Look at which file offset we have finished scanning - # the logfile last time. If we have never seen this file - # before, we set the offset to -1 - offset, prev_inode = status.get(logfile, (-1, -1)) - try: - file_desc = os.open(logfile, os.O_RDONLY) - if not is_inode_cabable(logfile): - inode = 1 # Create a dummy inode - else: - inode = os.fstat(file_desc)[1] # 1 = st_ino - except: - if debug: - raise - sys.stdout.write("[[[%s:cannotopen]]]\n" % logfile) - return - - sys.stdout.write("[[[%s]]]\n" % logfile) - - # Seek to the current end in order to determine file size - current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4 - status[logfile] = current_end, inode - - # If we have never seen this file before, we just set the - # current pointer to the file end. We do not want to make - # a fuss about ancient log messages... - if offset == -1: - if not debug: - return - else: - offset = 0 - - - # If the inode of the logfile has changed it has appearently - # been started from new (logfile rotation). At least we must - # assume that. In some rare cases (restore of a backup, etc) - # we are wrong and resend old log messages - if prev_inode >= 0 and inode != prev_inode: - offset = 0 - - # Our previously stored offset is the current end -> - # no new lines in this file - if offset == current_end: - return # nothing new - - # If our offset is beyond the current end, the logfile has been - # truncated or wrapped while keeping the same inode. We assume - # that it contains all new data in that case and restart from - # offset 0. - if offset > current_end: - offset = 0 - - # now seek to offset where interesting data begins - os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4 - if os_type == "windows": - import io # Available with python 2.6 - import codecs - # Some windows files are encoded in utf_16 - # Peak the first two bytes to determine the encoding... - peak_handle = os.fdopen(file_desc, "rb") - first_two_bytes = peak_handle.read(2) - use_encoding = None - if first_two_bytes == "\xFF\xFE": - use_encoding = "utf_16" - elif first_two_bytes == "\xFE\xFF": - use_encoding = "utf_16_be" - - os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4 - file_handle = io.open(file_desc, encoding = use_encoding) - else: - file_handle = os.fdopen(file_desc) - worst = -1 - outputtxt = "" - lines_parsed = 0 - start_time = time.time() - - while True: - line = next_line(file_handle) - if line == None: - break # End of file - - # Handle option maxlinesize - if opt_maxlinesize != None and len(line) > opt_maxlinesize: - line = line[:opt_maxlinesize] + "[TRUNCATED]\n" - - lines_parsed += 1 - # Check if maximum number of new log messages is exceeded - if opt_maxlines != None and lines_parsed > opt_maxlines: - outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % ( - opt_overflow, opt_maxlines) - worst = max(worst, opt_overflow_level) - os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages - break - - # Check if maximum processing time (per file) is exceeded. Check only - # every 100'th line in order to save system calls - if opt_maxtime != None and lines_parsed % 100 == 10 \ - and time.time() - start_time > opt_maxtime: - outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % ( - opt_overflow, opt_maxtime) - worst = max(worst, opt_overflow_level) - os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages - break - - level = "." - for lev, pattern, cont_patterns, replacements in patterns: - matches = pattern.search(line[:-1]) - if matches: - level = lev - levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev] - worst = max(levelint, worst) - - # Check for continuation lines - for cont_pattern in cont_patterns: - if type(cont_pattern) == int: # add that many lines - for _unused_x in range(cont_pattern): - cont_line = next_line(file_handle) - if cont_line == None: # end of file - break - line = line[:-1] + "\1" + cont_line - - else: # pattern is regex - while True: - cont_line = next_line(file_handle) - if cont_line == None: # end of file - break - elif cont_pattern.search(cont_line[:-1]): - line = line[:-1] + "\1" + cont_line - else: - pushed_back_line = cont_line # sorry for stealing this line - break - - # Replacement - for replace in replacements: - line = replace.replace('\\0', line.rstrip()) + "\n" - for nr, group in enumerate(matches.groups()): - line = line.replace('\\%d' % (nr+1), group) - - break # matching rule found and executed - - color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level] - if debug: - line = line.replace("\1", "\nCONT:") - if level == "I": - level = "." - if opt_nocontext and level == '.': - continue - outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal) - - new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4 - status[logfile] = new_offset, inode - - # output all lines if at least one warning, error or ok has been found - if worst > -1: - sys.stdout.write(outputtxt) - sys.stdout.flush() - - # Handle option maxfilesize, regardless of warning or errors that have happened - if opt_maxfilesize != None and (offset / opt_maxfilesize) < (new_offset / opt_maxfilesize): - sys.stdout.write("%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" % - (tty_yellow, opt_maxfilesize, new_offset / opt_maxfilesize, tty_normal)) - -try: - # This removes leftover folders which may be generated by crashing frozen binaries - folder_cleaner = MEIFolderCleaner() - folder_cleaner.find_and_remove_leftover_folders(hint_filenames = ["mk_logwatch.exe.manifest"]) -except Exception, e: - sys.stdout.write("ERROR WHILE DOING FOLDER: %s\n" % e) - sys.exit(1) - -try: - config = read_config() -except Exception, e: - if debug: - raise - sys.stdout.write("CANNOT READ CONFIG FILE: %s\n" % e) - sys.exit(1) - -# Simply ignore errors in the status file. In case of a corrupted status file we simply begin -# with an empty status. That keeps the monitoring up and running - even if we might lose a -# message in the extreme case of a corrupted status file. -try: - status = read_status() -except Exception, e: - status = {} - - -logfile_patterns = {} -# The filename line may contain options like 'maxlines=100' or 'maxtime=10' -for filenames, patterns in config: - # Initialize options with default values - opt_maxlines = None - opt_maxtime = None - opt_maxlinesize = None - opt_maxfilesize = None - opt_regex = None - opt_overflow = 'C' - opt_overflow_level = 2 - opt_nocontext = False - try: - options = [ o.split('=', 1) for o in filenames if '=' in o ] - for key, value in options: - if key == 'maxlines': - opt_maxlines = int(value) - elif key == 'maxtime': - opt_maxtime = float(value) - elif key == 'maxlinesize': - opt_maxlinesize = int(value) - elif key == 'maxfilesize': - opt_maxfilesize = int(value) - elif key == 'overflow': - if value not in [ 'C', 'I', 'W', 'O' ]: - raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value) - opt_overflow = value - opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value] - elif key == 'regex': - opt_regex = re.compile(value) - elif key == 'iregex': - opt_regex = re.compile(value, re.I) - elif key == 'nocontext': - opt_nocontext = True - else: - raise Exception("Invalid option %s" % key) - except Exception, e: - if debug: - raise - sys.stdout.write("INVALID CONFIGURATION: %s\n" % e) - sys.exit(1) - - - for glob_pattern in filenames: - if '=' in glob_pattern: - continue - logfiles = glob.glob(glob_pattern) - if opt_regex: - logfiles = [ f for f in logfiles if opt_regex.search(f) ] - if len(logfiles) == 0: - sys.stdout.write('[[[%s:missing]]]\n' % glob_pattern) - else: - for logfile in logfiles: - logfile_patterns[logfile] = logfile_patterns.get(logfile, []) + patterns - -for logfile, patterns in logfile_patterns.items(): - process_logfile(logfile, patterns) - -if not debug: - save_status(status) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logwatch.aix b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logwatch.aix deleted file mode 100755 index 8996093c..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_logwatch.aix +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/ksh -# Logfile monitoring for AIX via errpt -# Beware: This Plugin clears the errors after each run, -# but it creates an detailed backup in /var/log/errpt_TIMESTAMP.log - -echo "<<>>" -echo "[[[errorlog]]]" -OUT=$(errpt | awk 'NR>1 { printf "C %s\n", $0 }') -if [[ $OUT != '' ]];then - echo "$OUT" - errpt -a > /var/log/errpt_$(date +%s).log - errclear 0 -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_mongodb b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_mongodb deleted file mode 100755 index 0d8b4883..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_mongodb +++ /dev/null @@ -1,205 +0,0 @@ -#!/usr/bin/python -# Monitor MongoDB on Linux - -import sys -import time -import pprint -import os -import datetime - -# This agent plugin creates various sections out of the MongoDB server status information. -# Important: 1) If MongoDB runs as single instance the agent data is assigned -# to the host same host where the plugin resides. -# -# 2) If MongoDB is deployed as replica set the agent data is piggybacked -# to a different hostname, name after the replica set name. -# You have to create a new host in the monitoring system matching the -# replica set name, or use the piggyback translation rule to modify the -# hostname according to your needs. - -try: - import pymongo -except ImportError, e: - sys.stderr.write("ERROR: Unable to import pymongo module\n") - sys.exit(2) - -# TODO: might be implemented in the future.. -host = None -port = None - -try: - con = pymongo.MongoClient(host, port) - try: - # pylint: disable=no-member - con = pymongo.database_names() - except: - con = pymongo.MongoClient(None, None, read_preference=pymongo.ReadPreference.SECONDARY) - - con.admin.read_preference = pymongo.ReadPreference.SECONDARY - - # if user and passwd: - # db = con["admin"] - # if not db.authenticate(user, passwd): - # sys.exit("Username/Password incorrect") - - server_status = con.admin.command("serverStatus") -except: - sys.stdout.write("<<>>\n") - sys.stdout.write("error\tInstance is down\n") - sys.exit(0) - -server_version = tuple(con.server_info()['version'].split('.')) - -repl_info = server_status.get("repl") - -sys.stdout.write("<<>>\n") -if not repl_info: - sys.stdout.write("mode\tSingle Instance\n") -else: - if repl_info.get("ismaster"): - sys.stdout.write("mode\tPrimary\n") - elif repl_info.get("secondary"): - sys.stdout.write("mode\tSecondary\n") - else: - sys.stdout.write("mode\tArbiter\n") - sys.stdout.write("address\t%s\n" % repl_info["me"]) - -sys.stdout.write("version\t%s\n" % server_status["version"]) -sys.stdout.write("pid\t%s\n" % server_status["pid"]) - -if repl_info: - if not repl_info.get("ismaster"): - sys.exit(0) - sys.stdout.write("<<<<%s>>>>\n" % repl_info["setName"]) - sys.stdout.write("<<>>\n") - sys.stdout.write("primary\t%s\n" % repl_info.get("primary")) - sys.stdout.write("hosts\t%s\n" % " ".join(repl_info.get("hosts"))) - sys.stdout.write("arbiters\t%s\n" % " ".join(repl_info.get("arbiters"))) - - sys.stdout.write("<<>>\n") - sys.stdout.write(pprint.pformat(con.admin.command("replSetGetStatus"))) - -sys.stdout.write("<<>>\n") -for key, value in server_status.get("asserts", {}).items(): - sys.stdout.write("%s %s\n" % (key, value)) - - -sys.stdout.write("<<>>\n") -sys.stdout.write("%s\n" % "\n".join(map(lambda x: "%s %s" % x, server_status["connections"].items()))) - -databases = dict(map(lambda x: (x, {}), con.database_names())) - -for name in databases.keys(): - databases[name]["collections"] = con[name].collection_names() - databases[name]["stats"] = con[name].command("dbstats") - databases[name]["collstats"] = {} - for collection in databases[name]["collections"]: - databases[name]["collstats"][collection] = con[name].command("collstats", collection) - - -sys.stdout.write("<<>>\n") -col = con.config.chunks -for db_name, db_data in databases.items(): - shards = col.distinct("shard") - sys.stdout.write("shardcount %d\n" % len(shards)) - for collection in db_data.get("collections"): - nsfilter = "%s.%s" % (db_name, collection) - sys.stdout.write("nscount %s %s\n" % (nsfilter, col.find({"ns": nsfilter}).count())) - for shard in shards: - sys.stdout.write("shardmatches %s#%s %s\n" % (nsfilter, shard, col.find({"ns": nsfilter, "shard": shard}).count())) - -sys.stdout.write("<<>>\n") -global_lock_info = server_status.get("globalLock") -if global_lock_info: - for what in [ "activeClients", "currentQueue" ]: - if what in global_lock_info: - for key, value in global_lock_info[what].items(): - sys.stdout.write("%s %s %s\n" % (what, key, value)) - -sys.stdout.write("<<>>\n") -sys.stdout.write("average_ms %s\n" % server_status["backgroundFlushing"]["average_ms"]) -sys.stdout.write("last_ms %s\n" % server_status["backgroundFlushing"]["last_ms"]) -sys.stdout.write("flushed %s\n" % server_status["backgroundFlushing"]["flushes"]) - -# Unused -#try: -# if server_version >= tuple("2.4.0".split(".")): -# indexCounters = server_status['indexCounters'] -# else: -# indexCounters = server_status['indexCounters']["btree"] -# print "<<>>" -# for key, value in indexCounters.items(): -# print "%s %s" % (key, value) -#except: -# pass - -sys.stdout.write("<<>>\n") -for key, value in server_status["mem"].items(): - sys.stdout.write("%s %s\n" % (key, value)) -for key, value in server_status["extra_info"].items(): - sys.stdout.write("%s %s\n" % (key, value)) - -sys.stdout.write("<<>>\n") -for what in ["opcounters", "opcountersRepl"]: - for key, value in server_status.get(what, {}).items(): - sys.stdout.write("%s %s %s\n" % (what, key, value)) - -sys.stdout.write("<<>>\n") -for dbname, dbdata in databases.items(): - for collname, colldata in dbdata.get("collstats", {}).items(): - for what, value in colldata.items(): - sys.stdout.write("%s\t%s\t%s\t%s\n" % (dbname, collname, what, value)) - -sys.stdout.write("<<>>\n") -sys.stdout.write("[[[MongoDB startupWarnings]]]\n") -startup_warnings = con.admin.command({"getLog": "startupWarnings"}) - -var_dir = os.environ.get("MK_VARDIR") -if var_dir: - state_file = "%s/mongodb.state" % var_dir - last_timestamp = None - output_all = False - - # Supports: Nov 6 13:44:09 - # 2015-10-17T05:35:24 - def get_timestamp(text): - for pattern in [ "%a %b %d %H:%M:%S", - "%Y-%m-%dT%H:%M:%S" ]: - try: - result = time.mktime(time.strptime(text, pattern)) - return result - except: - continue - - year_available = False - if os.path.exists(state_file): - last_timestamp = int(file(state_file).read()) - if time.localtime(last_timestamp).tm_year >= 2015: - year_available = True - - # Note: there is no year information in these loglines - # As workaround we look at the creation date (year) of the last statefile - # If it differs and there are new messages we start from the beginning - if not year_available: - statefile_year = time.localtime(os.stat(state_file).st_ctime).tm_year - if time.localtime().tm_year != statefile_year: - output_all = True - - for line in startup_warnings["log"]: - state = "C" - state_index = line.find("]")+2 - if len(line) == state_index or line[state_index:].startswith("** "): - state = "." - - if "** WARNING:" in line: - state = "W" - - if output_all or get_timestamp(line.split(".")[0]) > last_timestamp: - sys.stdout.write("%s %s\n" % (state, line)) - - # update state file - if startup_warnings["log"]: - file(state_file, "w").write("%d" % get_timestamp(startup_warnings["log"][-1].split(".")[0])) - -sys.stdout.write("<<<<>>>>\n") - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_mysql b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_mysql deleted file mode 100755 index 72593dad..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_mysql +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# gets optional socket as argument -function do_query() { - INSTANCE=$(echo $1|awk -v FS="=" '{print $2}') - COUNT=$(ps -efww | grep [/]usr/sbin/mysqld | grep socket | wc -l) - if [ $COUNT -gt 1 ] - then - INSTANCE_NAME=$(ps -efww|grep socket|grep "${INSTANCE}"|grep "[u]ser" | sed -ne 's/.*socket=\([^.]*\).*/\1/p') - INSTANCE_NAME="[[${INSTANCE_NAME##*/}]]" - else - INSTANCE_NAME="[[$(ps -efww|grep socket|grep "${INSTANCE}"|grep "[u]ser" | sed -ne 's/.*user=\([^ ]*\).*/\1/p')]]" - fi - - - - # Check if mysqld is running and root password setup - echo "<<>>" - echo $INSTANCE_NAME - mysqladmin --defaults-extra-file=$MK_CONFDIR/mysql.cfg $1 ping 2>&1 - - if [ $? -eq 0 ]; then - - echo "<<>>" - echo $INSTANCE_NAME - mysql --defaults-extra-file=$MK_CONFDIR/mysql.cfg $1 -sN \ - -e "show global status ; show global variables ;" - - echo "<<>>" - echo $INSTANCE_NAME - mysql --defaults-extra-file=$MK_CONFDIR/mysql.cfg $1 -sN \ - -e "SELECT table_schema, sum(data_length + index_length), sum(data_free) - FROM information_schema.TABLES GROUP BY table_schema" - - echo "<<>>" - echo $INSTANCE_NAME - mysql --defaults-extra-file=$MK_CONFDIR/mysql.cfg $1 -s \ - -e "show slave status\G" - - fi - -} - -if which mysqladmin >/dev/null -then - mysql_sockets=$(fgrep socket $MK_CONFDIR/mysql.cfg|sed -ne 's/.*socket=\([^ ]*\).*/\1/p') - if [ -z "$mysql_sockets" ] ; then - mysql_sockets=$(ps -efww | grep mysqld | grep "[s]ocket" | sed -ne 's/.*socket=\([^ ]*\).*/\1/p') - fi - if [ -z "$mysql_sockets" ] ; then - do_query "" - else - for socket in $mysql_sockets ; do - do_query "--socket="$socket - done - fi - #echo "<<>>" - #mysql -V - - echo "<<>>" - ps -efww|grep mysqld|while read LINE; do echo $LINE|grep "[u]ser" | sed -ne 's/.*user=\([^ ]*\).*/\1/p'; echo $LINE|grep mysqld | grep "[p]ort"|sed -ne 's/.*port=\([^ ]*\).*/\1/p' ; done|xargs -n2 - - #echo "<<>>" - #mysql --defaults-extra-file=$MK_CONFDIR/mysql.cfg $1 -s \ - # -e "show INSTANCES" - -fi - - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_omreport b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_omreport deleted file mode 100755 index 7960a2b2..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_omreport +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -if which omreport >/dev/null -then - echo "<<>>" - omreport storage vdisk -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle deleted file mode 100755 index 73cd1a98..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle +++ /dev/null @@ -1,1378 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Check_MK agent plugin for monitoring ORACLE databases -# This plugin is a result of the common work of Thorsten Bruhns -# and Mathias Kettner. Thorsten is responsible for the ORACLE -# stuff, Mathias for the shell hacking... -# This plugin is available for linux AND solaris. - -# Example for mk_oracle.cfg -# DBUSER=:::: -# ASMUSER=:::: -# -# SYSDBA or SYSASM is optional but needed for a mounted instance -# HOSTNAME is optional - Default is localhost -# PORT is optional - Default is 1521 - -# ONLY_SIDS is only usable for local running instances. It is ignored for -# REMOTE_-Instances. The same applies to EXCLUDE. - -while test $# -gt 0 -do - if [ "${1}" = '-d' ] ; then - set -xv ; DEBUG=1 - elif [ "${1}" = '-t' ] ; then - DEBUGCONNECT=1 - fi - shift -done - -if [ ! "$MK_CONFDIR" ] ; then - echo "MK_CONFDIR not set!" >&2 - exit 1 -fi - -if [ ! "$MK_VARDIR" ] ; then - export MK_VARDIR=$MK_CONFDIR -fi - - -# .--Config--------------------------------------------------------------. -# | ____ __ _ | -# | / ___|___ _ __ / _(_) __ _ | -# | | | / _ \| '_ \| |_| |/ _` | | -# | | |__| (_) | | | | _| | (_| | | -# | \____\___/|_| |_|_| |_|\__, | | -# | |___/ | -# +----------------------------------------------------------------------+ -# | The user can override and set variables in mk_oracle.cfg | -# '----------------------------------------------------------------------' - -# Sections that run fast and do no caching -SYNC_SECTIONS="instance sessions logswitches undostat recovery_area processes recovery_status longactivesessions dataguard_stats performance locks" - -# Sections that are run in the background and at a larger interval. -# Note: sections not listed in SYNC_SECTIONS or ASYNC_SECTIONS will not be -# executed at all! -ASYNC_SECTIONS="tablespaces rman jobs ts_quotas resumable" - -# Sections that are run in the background and at a larger interval. -# Note: _ASM_ sections are only executed when SID starts with '+' -# sections listed in SYNC_SECTIONS or ASYNC_SECTIONS are not -# executed for ASM. -SYNC_ASM_SECTIONS="instance processes" -ASYNC_ASM_SECTIONS="asm_diskgroup" - -# Interval for running async checks (in seconds) -CACHE_MAXAGE=600 - -# You can specify a list of SIDs to monitor. Those databases will -# only be handled, if they are found running, though! -# -# ONLY_SIDS="XE ORCL FOO BAR" -# -# It is possible to filter SIDS negatively. Just add the following to -# the mk_oracle.cfg file: -# -# EXCLUDE_="ALL" -# -# Another option is to filter single checks for SIDS. Just add -# lines as follows to the mk_oracle.cfg file. One service per -# line: -# -# EXCLUDE_="" -# -# For example skip oracle_sessions and oracle_logswitches checks -# for the instance "mysid". -# -# EXCLUDE_mysid="sessions logswitches" -# - -# Source the optional configuration file for this agent plugin -if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] -then - . $MK_CONFDIR/mk_oracle.cfg -fi - -#. -# .--SQL Queries---------------------------------------------------------. -# | ____ ___ _ ___ _ | -# | / ___| / _ \| | / _ \ _ _ ___ _ __(_) ___ ___ | -# | \___ \| | | | | | | | | | | |/ _ \ '__| |/ _ \/ __| | -# | ___) | |_| | |___ | |_| | |_| | __/ | | | __/\__ \ | -# | |____/ \__\_\_____| \__\_\\__,_|\___|_| |_|\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | The following functions create SQL queries for ORACLE and output | -# | them to stdout. All queries output the database name or the instane | -# | name as first column. | -# '----------------------------------------------------------------------' - -sql_performance() -{ - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "select upper(i.INSTANCE_NAME) - ||'|'|| 'sys_time_model' - ||'|'|| S.STAT_NAME - ||'|'|| Round(s.value/1000000) - from v\$instance i, - v\$sys_time_model s - where s.stat_name in('DB time', 'DB CPU') - order by s.stat_name; - select upper(i.INSTANCE_NAME) - ||'|'|| 'buffer_pool_statistics' - ||'|'|| b.name - ||'|'|| b.db_block_gets - ||'|'|| b.db_block_change - ||'|'|| b.consistent_gets - ||'|'|| b.physical_reads - ||'|'|| b.physical_writes - ||'|'|| b.FREE_BUFFER_WAIT - ||'|'|| b.BUFFER_BUSY_WAIT - from v\$instance i, V\$BUFFER_POOL_STATISTICS b; - select upper(i.INSTANCE_NAME) - ||'|'|| 'SGA_info' - ||'|'|| s.name - ||'|'|| s.bytes - from v\$sgainfo s, v\$instance i; - select upper(i.INSTANCE_NAME) - ||'|'|| 'librarycache' - ||'|'|| b.namespace - ||'|'|| b.gets - ||'|'|| b.gethits - ||'|'|| b.pins - ||'|'|| b.pinhits - ||'|'|| b.reloads - ||'|'|| b.invalidations - from v\$instance i, V\$librarycache b;" - fi -} - -sql_tablespaces() -{ - echo 'PROMPT <<>>' - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE - ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY - ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE - ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks - ||'|'|| contents - ||'|'|| iversion - from v\$database d , v\$instance i, ( - select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, - f.ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents, - (select version from v\$instance) iversion - from dba_data_files f, dba_tablespaces t, dba_free_space fs - where f.tablespace_name = t.tablespace_name - and f.file_id = fs.file_id(+) - group by f.file_name, f.tablespace_name, f.status, f.autoextensible, - f.blocks, f.maxblocks, f.user_blocks, f.increment_by, f.online_status, - t.block_size, t.status, t.contents) - where d.database_role = 'PRIMARY'; - - select upper(decode(${IGNORE_DB_NAME:-0} - , 0, dbf.name - , i.instance_name)) - || '|' || dbf.file_name - || '|' || dbf.tablespace_name - || '|' || dbf.fstatus - || '|' || dbf.AUTOEXTENSIBLE - || '|' || dbf.blocks - || '|' || dbf.maxblocks - || '|' || dbf.USER_BLOCKS - || '|' || dbf.INCREMENT_BY - || '|' || dbf.ONLINE_STATUS - || '|' || dbf.BLOCK_SIZE - || '|' || decode(tstatus,'READ ONLY', 'READONLY', tstatus) - || '|' || dbf.free_blocks - || '|' || 'TEMPORARY' - || '|' || i.version - FROM v\$database d - JOIN v\$instance i ON 1 = 1 - JOIN ( - SELECT vp.name, - f.file_name, - t.tablespace_name, - f.status fstatus, - f.autoextensible, - f.blocks, - f.maxblocks, - f.user_blocks, - f.increment_by, - 'ONLINE' online_status, - t.block_size, - t.status tstatus, - f.blocks - nvl(SUM(tu.blocks),0) free_blocks, - t.contents - FROM dba_tablespaces t - JOIN ( SELECT 0 - ,name - FROM v\$database - ) vp ON 1=1 - LEFT OUTER JOIN dba_temp_files f ON t.tablespace_name = f.tablespace_name - LEFT OUTER JOIN gv\$tempseg_usage tu ON f.tablespace_name = tu.tablespace - AND f.RELATIVE_FNO = tu.SEGRFNO# - WHERE t.contents = 'TEMPORARY' - GROUP BY vp.name, - f.file_name, - t.tablespace_name, - f.status, - f.autoextensible, - f.blocks, - f.maxblocks, - f.user_blocks, - f.increment_by, - t.block_size, - t.status, - t.contents - ) dbf ON 1 = 1;" - - elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then - - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE - ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY - ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE - ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks - ||'|'|| contents - from v\$database d , v\$instance i, ( - select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, - 'ONLINE' ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents - from dba_data_files f, dba_tablespaces t, dba_free_space fs - where f.tablespace_name = t.tablespace_name - and f.file_id = fs.file_id(+) - group by f.file_name, f.tablespace_name, f.status, f.autoextensible, - f.blocks, f.maxblocks, f.user_blocks, f.increment_by, 'ONLINE', - t.block_size, t.status, t.contents - UNION - select f.file_name, f.tablespace_name, 'ONLINE' status, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', - t.BLOCK_SIZE, 'TEMP' status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' - from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh - WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id - GROUP BY th.instance, f.file_name, f.tablespace_name, 'ONLINE', - f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, - 'TEMP', t.block_size, t.status); - " - fi -} - -sql_dataguard_stats() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| upper(d.DB_UNIQUE_NAME) - ||'|'|| d.DATABASE_ROLE - ||'|'|| ds.name - ||'|'|| ds.value - ||'|'|| d.SWITCHOVER_STATUS - FROM v\$database d - JOIN v\$parameter vp on 1=1 - JOIN v\$instance i on 1=1 - left outer join V\$dataguard_stats ds on 1=1 - WHERE vp.name = 'log_archive_config' - AND vp.value is not null - ORDER BY 1; - " - fi -} - -sql_recovery_status() -{ - echo 'PROMPT <<>>' - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| d.DB_UNIQUE_NAME - ||'|'|| d.DATABASE_ROLE - ||'|'|| d.open_mode - ||'|'|| dh.file# - ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) - ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) - ||'|'|| dh.STATUS - ||'|'|| dh.RECOVER - ||'|'|| dh.FUZZY - ||'|'|| dh.CHECKPOINT_CHANGE# - FROM V\$datafile_header dh, v\$database d, v\$instance i - ORDER BY dh.file#; - " - elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| d.DATABASE_ROLE - ||'|'|| d.open_mode - ||'|'|| dh.file# - ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) - ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) - ||'|'|| dh.STATUS - ||'|'|| dh.RECOVER - ||'|'|| dh.FUZZY - ||'|'|| dh.CHECKPOINT_CHANGE# - FROM V\$datafile_header dh, v\$database d, v\$instance i - ORDER BY dh.file#; - " - fi -} - -sql_rman() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "select /*"$HINT_RMAN" check_mk rman1 */ upper(name) - || '|'|| 'COMPLETED' - || '|'|| to_char(COMPLETION_TIME, 'YYYY-mm-dd_HH24:MI:SS') - || '|'|| to_char(COMPLETION_TIME, 'YYYY-mm-dd_HH24:MI:SS') - || '|'|| case when INCREMENTAL_LEVEL IS NULL - then 'DB_FULL' - else 'DB_INCR' - end - || '|'|| INCREMENTAL_LEVEL - || '|'|| round(((sysdate-COMPLETION_TIME) * 24 * 60), 0) - || '|'|| INCREMENTAL_CHANGE# - from (select upper(decode(${IGNORE_DB_NAME:-0}, 0, vd.NAME, i.instance_name)) name - , bd2.INCREMENTAL_LEVEL, bd2.INCREMENTAL_CHANGE#, min(bd2.COMPLETION_TIME) COMPLETION_TIME - from (select bd.file#, bd.INCREMENTAL_LEVEL, max(bd.COMPLETION_TIME) COMPLETION_TIME - from v\$backup_datafile bd - join v\$datafile_header dh on dh.file# = bd.file# - where dh.status = 'ONLINE' - group by bd.file#, bd.INCREMENTAL_LEVEL - ) bd - join v\$backup_datafile bd2 on bd2.file# = bd.file# - and bd2.COMPLETION_TIME = bd.COMPLETION_TIME - join v\$database vd on vd.RESETLOGS_CHANGE# = bd2.RESETLOGS_CHANGE# - join v\$instance i on 1=1 - group by upper(decode(${IGNORE_DB_NAME:-0}, 0, vd.NAME, i.instance_name)) - , bd2.INCREMENTAL_LEVEL - , bd2.INCREMENTAL_CHANGE# - order by name, bd2.INCREMENTAL_LEVEL); - - select /*"$HINT_RMAN" check_mk rman2 */ name - || '|' || 'COMPLETED' - || '|' - || '|' || to_char(CHECKPOINT_TIME, 'yyyy-mm-dd_hh24:mi:ss') - || '|' || 'CONTROLFILE' - || '|' - || '|' || round((sysdate - CHECKPOINT_TIME) * 24 * 60) - || '|' || '0' - from (select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) name - ,max(bcd.CHECKPOINT_TIME) CHECKPOINT_TIME - from v\$database d - join V\$BACKUP_CONTROLFILE_DETAILS bcd on d.RESETLOGS_CHANGE# = bcd.RESETLOGS_CHANGE# - join v\$instance i on 1=1 - group by upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ); - - select /*"$HINT_RMAN" check_mk rman3 */ name - || '|COMPLETED' - || '|'|| to_char(sysdate, 'YYYY-mm-dd_HH24:MI:SS') - || '|'|| to_char(completed, 'YYYY-mm-dd_HH24:MI:SS') - || '|ARCHIVELOG||' - || round((sysdate - completed)*24*60,0) - || '|' - from ( - select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) name - , max(a.completion_time) completed - , case when a.backup_count > 0 then 1 else 0 end - from v\$archived_log a, v\$database d, v\$instance i - where a.backup_count > 0 - and a.dest_id in - (select b.dest_id - from v\$archive_dest b - where b.target = 'PRIMARY' - and b.SCHEDULE = 'ACTIVE' - ) - group by d.NAME, i.instance_name - , case when a.backup_count > 0 then 1 else 0 end);" - fi -} - -sql_recovery_area() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||' '|| round((SPACE_USED-SPACE_RECLAIMABLE)/ - (CASE NVL(SPACE_LIMIT,1) WHEN 0 THEN 1 ELSE SPACE_LIMIT END)*100) - ||' '|| round(SPACE_LIMIT/1024/1024) - ||' '|| round(SPACE_USED/1024/1024) - ||' '|| round(SPACE_RECLAIMABLE/1024/1024) - ||' '|| d.FLASHBACK_ON - from V\$RECOVERY_FILE_DEST, v\$database d, v\$instance i; - " - fi -} - -sql_undostat() -{ - echo 'PROMPT <<>>' - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo "select upper(i.INSTANCE_NAME) - ||'|'|| ACTIVEBLKS - ||'|'|| MAXCONCURRENCY - ||'|'|| TUNED_UNDORETENTION - ||'|'|| maxquerylen - ||'|'|| NOSPACEERRCNT - from v\$instance i, - (select * from (select * - from v\$undostat order by end_time desc - ) - where rownum = 1 - and TUNED_UNDORETENTION > 0 - ); - " - elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then - # TUNED_UNDORETENTION and ACTIVEBLKS are not availibe in Oracle <=9.2! - # we sent a -1 for filtering in check_undostat - echo "select upper(i.INSTANCE_NAME) - ||'|-1' - ||'|'|| MAXCONCURRENCY - ||'|-1' - ||'|'|| maxquerylen - ||'|'|| NOSPACEERRCNT - from v\$instance i, - (select * from (select * - from v\$undostat order by end_time desc - ) - where rownum = 1 - ); - " - fi -} - -sql_resumable() -{ - echo 'PROMPT <<>>' - echo "select upper(i.INSTANCE_NAME) - ||'|'|| u.username - ||'|'|| a.SESSION_ID - ||'|'|| a.status - ||'|'|| a.TIMEOUT - ||'|'|| round((sysdate-to_date(a.SUSPEND_TIME,'mm/dd/yy hh24:mi:ss'))*24*60*60) - ||'|'|| a.ERROR_NUMBER - ||'|'|| to_char(to_date(a.SUSPEND_TIME, 'mm/dd/yy hh24:mi:ss'),'mm/dd/yy_hh24:mi:ss') - ||'|'|| a.RESUME_TIME - ||'|'|| a.ERROR_MSG - from dba_resumable a, v\$instance i, dba_users u - where a.INSTANCE_ID = i.INSTANCE_NUMBER - and u.user_id = a.user_id - and a.SUSPEND_TIME is not null - union all - select upper(i.INSTANCE_NAME) - || '|||||||||' - from v\$instance i -; - " -} - -sql_jobs() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, vd.NAME, i.instance_name)) - ||'|'|| j.OWNER - ||'|'|| j.JOB_NAME - ||'|'|| j.STATE - ||'|'|| ROUND((TRUNC(sysdate) + j.LAST_RUN_DURATION - TRUNC(sysdate)) * 86400) - ||'|'|| j.RUN_COUNT - ||'|'|| j.ENABLED - ||'|'|| NVL(j.NEXT_RUN_DATE, to_date('1970-01-01', 'YYYY-mm-dd')) - ||'|'|| NVL(j.SCHEDULE_NAME, '-') - ||'|'|| jd.STATUS - FROM dba_scheduler_jobs j - join v\$database vd on 1 = 1 - join v\$instance i on 1 = 1 - left outer join (SELECT owner, job_name, max(LOG_ID) log_id - FROM dba_scheduler_job_run_details dd - group by owner, job_name - ) jm on jm.JOB_NAME = j.JOB_NAME - and jm.owner=j.OWNER - left outer join dba_scheduler_job_run_details jd - on jd.owner = jm.OWNER - AND jd.JOB_NAME = jm.JOB_NAME - AND jd.LOG_ID = jm.LOG_ID - WHERE j.auto_drop = 'FALSE';" - fi -} - -sql_ts_quotas() -{ - echo 'PROMPT <<>>' - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| Q.USERNAME - ||'|'|| Q.TABLESPACE_NAME - ||'|'|| Q.BYTES - ||'|'|| Q.MAX_BYTES - from dba_ts_quotas Q, v\$database d, v\$instance i - where max_bytes > 0 - union all - select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|||' - from v\$database d, v\$instance i - order by 1; - " -} - -sql_version() -{ - echo 'PROMPT <<>>' - echo "select upper(i.INSTANCE_NAME) - || ' ' || banner - from v\$version, v\$instance i - where banner like 'Oracle%';" -} - -sql_instance() -{ - echo 'prompt <<>>' - if [ ${ORACLE_SID:0:1} = '+' ] ; then - # ASM - echo "select upper(i.instance_name) - || '|' || i.VERSION - || '|' || i.STATUS - || '|' || i.LOGINS - || '|' || i.ARCHIVER - || '|' || round((sysdate - i.startup_time) * 24*60*60) - || '|' || '0' - || '|' || 'NO' - || '|' || 'ASM' - || '|' || 'NO' - || '|' || i.instance_name - from v\$instance i; - " - else - # normal Instance - echo "select upper(i.instance_name) - || '|' || i.VERSION - || '|' || i.STATUS - || '|' || i.LOGINS - || '|' || i.ARCHIVER - || '|' || round((sysdate - i.startup_time) * 24*60*60) - || '|' || DBID - || '|' || LOG_MODE - || '|' || DATABASE_ROLE - || '|' || FORCE_LOGGING - || '|' || d.name - || '|' || to_char(d.created, 'ddmmyyyyhh24mi') - from v\$instance i, v\$database d; - " - fi -} - -sql_sessions() -{ - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || '|' || CURRENT_UTILIZATION - || '|' || ltrim(LIMIT_VALUE) - || '|' || MAX_UTILIZATION - from v\$resource_limit, v\$instance i - where RESOURCE_NAME = 'sessions';" - -} - -sql_processes() -{ - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || ' ' || CURRENT_UTILIZATION - || ' ' || ltrim(rtrim(LIMIT_VALUE)) - from v\$resource_limit, v\$instance i - where RESOURCE_NAME = 'processes'; - " -} - -sql_logswitches() -{ - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || ' ' || logswitches - from v\$instance i , - (select count(1) logswitches - from v\$loghist h , v\$instance i - where h.first_time > sysdate - 1/24 - and h.thread# = i.instance_number - ); - " -} - -sql_locks() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || '|' || b.sid - || '|' || b.serial# - || '|' || b.machine - || '|' || b.program - || '|' || b.process - || '|' || b.osuser - || '|' || b.username - || '|' || b.SECONDS_IN_WAIT - || '|' || b.BLOCKING_SESSION_STATUS - || '|' || bs.inst_id - || '|' || bs.sid - || '|' || bs.serial# - || '|' || bs.machine - || '|' || bs.program - || '|' || bs.process - || '|' || bs.osuser - || '|' || bs.username - from v\$session b - join v\$instance i on 1=1 - join gv\$session bs on bs.inst_id = b.BLOCKING_INSTANCE - and bs.sid = b.BLOCKING_SESSION - where b.BLOCKING_SESSION is not null -; - select upper(i.instance_name) - || '|||||||||||||||||' - from v\$instance i -; - " - fi -} - -sql_locks_old() -{ - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo 'prompt <<>>' - echo "SET SERVEROUTPUT ON feedback off -DECLARE - type x is table of varchar2(20000) index by pls_integer; - xx x; -begin - begin - execute immediate 'select upper(i.instance_name) - || ''|'' || a.sid - || ''|'' || b.serial# - || ''|'' || b.machine - || ''|'' || b.program - || ''|'' || b.process - || ''|'' || b.osuser - || ''|'' || a.ctime - || ''|'' || decode(c.owner,NULL,''NULL'',c.owner) - || ''|'' || decode(c.object_name,NULL,''NULL'',c.object_name) - from V\$LOCK a, v\$session b, dba_objects c, v\$instance i - where (a.id1, a.id2, a.type) - IN (SELECT id1, id2, type - FROM GV\$LOCK - WHERE request>0 - ) - and request=0 - and a.sid = b.sid - and a.id1 = c.object_id (+) - union all - select upper(i.instance_name) || ''|||||||||'' - from v\$instance i' - bulk collect into xx; - if xx.count >= 1 then - for i in 1 .. xx.count loop - dbms_output.put_line(xx(i)); - end loop; - end if; - exception - when others then - for cur1 in (select upper(i.instance_name) instance_name from v\$instance i) loop - dbms_output.put_line(cur1.instance_name || '|||||||||'||sqlerrm); - end loop; - end; -END; -/ -set serverout off -" - fi -} - -sql_longactivesessions() -{ - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || '|' || s.sid - || '|' || s.serial# - || '|' || s.machine - || '|' || s.process - || '|' || s.osuser - || '|' || s.program - || '|' || s.last_call_et - || '|' || s.sql_id - from v\$session s, v\$instance i - where s.status = 'ACTIVE' - and type != 'BACKGROUND' - and s.username is not null - and s.username not in('PUBLIC') - and s.last_call_et > 60*60 - union all - select upper(i.instance_name) - || '||||||||' - from v\$instance i; - " - fi -} - -sql_asm_diskgroup() -{ - echo 'prompt <<>>' - if [ "$AT_LEAST_ORACLE_112" = 'yes' ] ; then - echo "select STATE - || ' ' || TYPE - || ' ' || 'N' - || ' ' || sector_size - || ' ' || block_size - || ' ' || allocation_unit_size - || ' ' || total_mb - || ' ' || free_mb - || ' ' || required_mirror_free_mb - || ' ' || usable_file_mb - || ' ' || offline_disks - || ' ' || voting_files - || ' ' || name || '/' - from v\$asm_diskgroup; - " - elif [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo "select STATE - || ' ' || TYPE - || ' ' || 'N' - || ' ' || sector_size - || ' ' || block_size - || ' ' || allocation_unit_size - || ' ' || total_mb - || ' ' || free_mb - || ' ' || required_mirror_free_mb - || ' ' || usable_file_mb - || ' ' || offline_disks - || ' ' || 'N' - || ' ' || name || '/' - from v\$asm_diskgroup; - " - fi -} - -#. -# .--set_osenv-----------------------------------------------------------. -# | _ | -# | ___ ___| |_ ___ ___ ___ _ ____ __ | -# | / __|/ _ \ __| / _ \/ __|/ _ \ '_ \ \ / / | -# | \__ \ __/ |_ | (_) \__ \ __/ | | \ V / | -# | |___/\___|\__|___\___/|___/\___|_| |_|\_/ | -# | |_____| | -# +----------------------------------------------------------------------+ -# | Functions for Operating System dependent stuff | -# '----------------------------------------------------------------------' - -function set_osenv () { - ostype=$(uname -s) - AWK=$(which awk) - - if [ ${ostype} = 'Linux' ] ; then - - GREP=$(which grep) - STATCX='stat -c %X' - STATCY='stat -c %Y' - - elif [ ${ostype} = 'SunOS' ] ; then - - # expand the PATH for inetd. Otherwise some stuff in /opt/sfw/bin is not found! - export PATH=$PATH:/usr/ucb:/usr/proc/bin:opt/sfw/bin:/opt/sfw/sbin:/usr/sfw/bin:/usr/sfw/sbin:/opt/csw/bin - - GREP=/usr/xpg4/bin/grep - - if [ ! -x $GREP ] ; then - echo "Please make sure that "$GREP" is existing on Solaris!" - echo "Aborting mk_oracle plugin." - exit 999 - fi - - STATCX='file_mtime' - STATCY='file_mtime' - - AWK=$(which nawk) - - else - ostype="unknown OS: "${ostype} - fi - -} - -#. -# .--oraenv--------------------------------------------------------------. -# | | -# | ___ _ __ __ _ ___ _ ____ __ | -# | / _ \| '__/ _` |/ _ \ '_ \ \ / / | -# | | (_) | | | (_| | __/ | | \ V / | -# | \___/|_| \__,_|\___|_| |_|\_/ | -# | | -# +----------------------------------------------------------------------+ -# | Functions for getting the Oracle environment | -# '----------------------------------------------------------------------' - -function set_oraenv () { - local SID=${1} - - if [[ "$SID" =~ ^REMOTE_INSTANCE_.* ]] ; then - - # we get the ORACLE_HOME from mk_oracle.cfg for REMOTE execution - ORACLE_HOME=${ORACLE_HOME:-${REMOTE_ORACLE_HOME}} - - else - # we need to keep an existing ORACLE_SID in remote mode - # => set it only in local mode - ORACLE_SID=$SID - - # we work in local mode - test -f /etc/oratab && ORATAB=/etc/oratab - # /var/opt/oracle/oratab is needed for Oracle Solaris - test -f /var/opt/oracle/oratab && ORATAB=/var/opt/oracle/oratab - test -f ${ORATAB:-""} || echo "ORA-99999 oratab not found" - test -f ${ORATAB:-""} || exit 1 - - ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID}":" | cut -d":" -f2) - if [ -z $ORACLE_HOME ] ; then - # cut last number from SID for Oracle RAC to find entry in oratab - ORACLE_SID_SHORT=$(echo $ORACLE_SID | sed "s/[0-9]$//") - ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID_SHORT}":" | cut -d":" -f2) - fi - fi - - LD_LIBRARY_PATH=$ORACLE_HOME/lib - - if [ ! -d ${ORACLE_HOME:-"not_found"} ] ; then - echo "ORA-99999 ORACLE_HOME for ORACLE_SID="$ORACLE_SID" not found or not existing!" - exit 1 - fi - - TNS_ADMIN=${TNS_ADMIN:-$MK_CONFDIR} - - test -f ${TNS_ADMIN}/sqlnet.ora || ( echo "ORA-99998 Couldn't find "${TNS_ADMIN}/sqlnet.ora ; exit 1) - - export ORACLE_HOME TNS_ADMIN ORACLE_SID LD_LIBRARY_PATH -} - -function get_oraversion () { - # oraenv is only needed when version is determined from sqlplus - set_oraenv ${1} - - if [[ ! "$1" =~ ^REMOTE_INSTANCE_.* ]] ; then - # we get the ORACLE_VERSION in main loop because this is only avalible from mk_oracle.cfg - # that file is only read in function sqlplus at later time! - - # get the version from ORACLE_HOME/bin/sqlplus - ORACLE_VERSION=$($ORACLE_HOME/bin/sqlplus -V | grep ^SQL | cut -d" " -f3 | cut -d"." -f-2) - fi - - # remove possible existing variables - unset AT_LEAST_ORACLE_180 AT_LEAST_ORACLE_122 AT_LEAST_ORACLE_121 AT_LEAST_ORACLE_112 AT_LEAST_ORACLE_111 AT_LEAST_ORACLE_102 AT_LEAST_ORACLE_101 AT_LEAST_ORACLE_92 - - if [ "$ORACLE_VERSION" = '18.0' ] ; then - AT_LEAST_ORACLE_180=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' ] ; then - AT_LEAST_ORACLE_122=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' ] ; then - AT_LEAST_ORACLE_121=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' ] ; then - AT_LEAST_ORACLE_112=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' ] ; then - AT_LEAST_ORACLE_111=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' ] ; then - AT_LEAST_ORACLE_102=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ - -o "$ORACLE_VERSION" = '10.1' ] ; then - AT_LEAST_ORACLE_101=yes - fi - - if [ "$ORACLE_VERSION" = '18.0' -o "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ - -o "$ORACLE_VERSION" = '10.1' -o "$ORACLE_VERSION" = '9.2' ] ; then - AT_LEAST_ORACLE_92=yes - fi -} - -#. -# .--Functions-----------------------------------------------------------. -# | _____ _ _ | -# | | ___| _ _ __ ___| |_(_) ___ _ __ ___ | -# | | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | -# | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | -# | |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | -# | | -# +----------------------------------------------------------------------+ -# | Helper functions | -# '----------------------------------------------------------------------' - - -function ora_session_environment() -{ - echo 'set pages 0 trimspool on feedback off lines 8000' - if [ "$AT_LEAST_ORACLE_102" = 'yes' -a ! "$DISABLE_ORA_SESSION_SETTINGS" ] ; then - echo 'set echo off' - echo 'alter session set "_optimizer_mjc_enabled"=false;' - - # cursor_sharing is not valid for ASM instances - if [ ! ${ORACLE_SID:0:1} = '+' ] ; then - echo 'alter session set cursor_sharing=exact;' - fi - - echo 'set echo on' - fi - echo 'whenever sqlerror exit 1' - echo ' ' -} - -# Helper function that calls an SQL statement with a clean output -# Usage: echo "..." | sqlplus SID -function sqlplus() { - loc_stdin=$(cat) - local SID=$1 - - - # reload mk_oracle.cfg for run_cached. Otherwise some variables are missing - if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] - then - . $MK_CONFDIR/mk_oracle.cfg - fi - - ORADBUSER="" - DBPASSWORD="" - - set_oraenv $SID - - if [[ "$1" =~ ^REMOTE_INSTANCE_.* ]] ; then - - # working on REMOTE_-Mode! - ORACFGLINE=$(eval echo \${$1}) - - ORACLE_SID=$(echo ${ORACFGLINE} | cut -d":" -f$[7]) - - TNSALIAS=$(echo $REMOTE_VARNAME | cut -d"_" -f3-) - - # we need to add the piggyback sections! - remote_hostname=$(echo ${ORACFGLINE} | cut -d":" -f$[6]) - - # build the piggyback information in loc_stdin - # <<<>>> SQL-Statements <<<<>>>> - loc_stdin=$(echo 'prompt <<<<'$remote_hostname'>>>>'; echo ' ')$loc_stdin$(echo ' '; echo 'prompt <<<<>>>>'; echo ' ') - else - # working with locally running instances - # mk_oracle_dbusers.conf is for compatibility. Do not use it anymore - ORACLE_USERCONF=${MK_CONFDIR}/mk_oracle_dbuser.conf - - TNSALIAS=${ORACLE_SID} - - # ASM use '+' as 1st character in SID! - if [ ${ORACLE_SID:0:1} = '+' ] ; then - ORACFGLINE=${ASMUSER} - else - # use an individuel user or the default DBUSER from mk_oracle.cfg - dummy="DBUSER_"${ORACLE_SID} - ORACFGLINE=${!dummy} - if [ "$ORACFGLINE" = '' ] ; then - ORACFGLINE=${DBUSER} - fi - fi - - if [ -f ${ORACLE_USERCONF} -a "${ORACFGLINE}" = '' ] ; then - # mk_oracle_dbuser.conf - ORACFGLINE=$(cat ${ORACLE_USERCONF} | grep "^"${ORACLE_SID}":") - # mk_oracle_dbuser has ORACLE_SID as 1. parameter. we need an offset for all values - offset=1 - else - # mk_oracle.cfg - offset=0 - fi - fi - - # SID_UPPER is required for later use in function sqlplus. - # gathering at this point is needed due to dependency to mk_oracle.cfg in remote mode - SID_UPPER=$(echo $ORACLE_SID | tr '[:lower:]' '[:upper:]') - export SID_UPPER - - offset=${offset:-0} - ORADBUSER=$(echo ${ORACFGLINE} | cut -d":" -f$[1+offset]) - DBPASSWORD=$(echo ${ORACFGLINE} | cut -d":" -f$[2+offset]) - DBSYSCONNECT=$(echo ${ORACFGLINE} | cut -d":" -f$[3+offset]) - DBHOST=$(echo ${ORACFGLINE} | cut -d":" -f$[4+offset]) - DBPORT=$(echo ${ORACFGLINE} | cut -d":" -f$[5+offset]) - - TNSPINGOK=no - if [ -f ${TNS_ADMIN}/tnsnames.ora ] ; then - if "${ORACLE_HOME}"/bin/tnsping "${TNSALIAS}" >/dev/null 2>&1 ; then - TNSPINGOK=yes - else - unset TNSALIAS - fi - else - unset TNSALIAS - fi - - if [ ! "${ORACFGLINE}" ] ; then - # no configuration found - # => use the wallet with tnsnames.ora or EZCONNECT - TNSALIAS=${TNSALIAS:-"localhost:1521/${ORACLE_SID}"} - else - if [ ${DBSYSCONNECT} ] ; then - assysdbaconnect=" as "${DBSYSCONNECT} - fi - - TNSALIAS=${TNSALIAS:-"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=${DBHOST:-"localhost"})(PORT=${DBPORT:-1521}))(CONNECT_DATA=(SID=${ORACLE_SID})(SERVER=DEDICATED)(UR=A)))"} - - # ORADBUSER = '/'? => ignore DBPASSWORD and use the wallet - if [ "${ORADBUSER}" = '/' ] ; then - # connect with / and wallet - ORADBUSER="" - DBPASSWORD="" - if [ "$TNSPINGOK" = 'no' ] ; then - # create an EZCONNECT string when no tnsnames.ora is usable - # defaults to localhost:1521/ - TNSALIAS="${DBHOST:-"localhost"}:${DBPORT:-1521}/${ORACLE_SID}" - fi - fi - fi - - DBCONNECT="${ORADBUSER}/${DBPASSWORD}@${TNSALIAS}${assysdbaconnect}" - - SQLPLUS=${ORACLE_HOME}/bin/sqlplus - if [ ! -x ${SQLPLUS} ] ; then - echo "sqlplus not found or ORACLE_HOME wrong! " - echo "SQLPLUS="${SQLPLUS} - return 1 - fi - - # add ora_session_environment as prefix before loc_stdin - loc_stdin=$(ora_session_environment)${loc_stdin} - - if OUTPUT=$(echo "$loc_stdin" | "$SQLPLUS" -L -s ${DBCONNECT}) - then - echo "$OUTPUT" - else - - if [ "$DEBUGCONNECT" ] ; then - echo "Logindetails: ${DBCONNECT}" >&2 - echo "$OUTPUT" - else - # SID_UPPER is required for backword compatibilty to old sqlplus.sh. - # THis will be removed in a future time - SID_UPPER=${SID_UPPER:-$(echo $SID | tr '[:lower:]' '[:upper:]')} - - # we need to add the piggyback as the sql didn't return any piggyback information in this situation - if [[ "$SID" =~ ^REMOTE_INSTANCE_.* ]] ; then - echo '<<<<'$remote_hostname'>>>>' - fi - - echo '<<>>' - echo "$OUTPUT" | grep -v "^ERROR at line" | tr '\n' ' ' | sed "s/^/$SID_UPPER|FAILURE|/" ; echo - - if [[ "$SID" =~ ^REMOTE_INSTANCE_.* ]] ; then - echo '<<<<>>>>' - fi - fi - fi -} - -function remove_excluded_sections () -{ - local sections="$1" - local excluded="$2" - local result="" - for section in $sections - do - local skip= - for exclude in $excluded - do - if [ "$exclude" = "$section" ] ; then - local skip=yes - break - fi - done - if [ "$skip" != yes ] ; then - result="$result $section" - fi - done - echo "$result" -} - - -# Create one SQL statements for several sections and run -# these with sqlplus. The exitcode is preserved. -function do_sync_checks () -{ - local SID=$1 - local SECTIONS="$2" - for section in $SECTIONS - do - eval "sql_$section" - done | sqlplus $SID -} - -function do_async_checks () -{ - local SID=$1 - echo "$ASYNC_SQL" | sqlplus $SID -} - -# Make sure that the new shell that is being run by run_cached inherits -# our functions -export -f sqlplus -export -f ora_session_environment -export -f do_async_checks -export -f set_oraenv - -function file_mtime() { - /usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($mtime);' "$1" -} - -function run_cached () { - local section= - if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi - local NAME=$1 - local MAXAGE=$2 - shift 2 - local CMDLINE="$section$@" - - if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi - CACHEFILE="$MK_VARDIR/cache/$NAME.cache" - - # Check if the creation of the cache takes suspiciously long and return - # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE - # perl is needed for Solaris => no date +%s availible - local NOW=$(perl -le "print time()") - if [ -e "$CACHEFILE.new" ] ; then - local CF_ATIME=$($STATCX "$CACHEFILE.new") - if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then - # Kill the process still accessing that file in case - # it is still running. This avoids overlapping processes! - fuser -k -9 "$CACHEFILE.new" >/dev/null 2>&1 - rm -f "$CACHEFILE.new" - return - fi - fi - - # Check if cache file exists and is recent enough - if [ -s "$CACHEFILE" ] ; then - local MTIME=$($STATCY "$CACHEFILE") - if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi - # Output the file in any case, even if it is - # outdated. The new file will not yet be available - cat "$CACHEFILE" - fi - - # Cache file outdated and new job not yet running? Start it - if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then - if [ "$DEBUG" ] ; then - echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | /bin/bash - else - # When the command fails, the output is throws away ignored - echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup /bin/bash >/dev/null 2>&1 & - fi - fi -} - -function do_testmode() { - echo "-----------------------------------------------" - echo "Operating System: "${ostype} - echo "Logincheck to Instance: "$SID" Version: "$ORACLE_VERSION - echo "select 'Login ok User: ' || user || ' on ' || host_name ||' Instance ' || instance_name - from v\$instance;" | sqlplus $SID - echo "SYNC_SECTIONS=$SECTIONS" - echo "ASYNC_SECTIONS=$ASECTIONS" - if [ "$IGNORE_DB_NAME" ] ; then - echo "IGNORE_DB_NAME found. Ignoring DB_NAME in all SQLs!" - fi - if [ "$DISABLE_ORA_SESSION_SETTINGS" ] ; then - echo "Paramter DISABLE_ORA_SESSION_SETTINGS found!" - fi - if [ "$HINT_RMAN" ] ; then - echo "Using HINT_RMAN for this Instance!" - fi -} - -function do_instance() { - - SID=$1 - - if [ ${ORACLE_SID:0:1} = '+' ] ; then - DO_ASYNC_SECTIONS=${ASYNC_ASM_SECTIONS} - DO_SYNC_SECTIONS=${SYNC_ASM_SECTIONS} - else - # switch sections to ASM - DO_SYNC_SECTIONS=${SYNC_SECTIONS} - DO_ASYNC_SECTIONS=${ASYNC_SECTIONS} - fi - - # Do sync checks - EXCLUDED=$(eval 'echo $EXCLUDE'"_$SID") - SECTIONS=$(remove_excluded_sections "$DO_SYNC_SECTIONS" "$EXCLUDED") - - # Do async checks - ASECTIONS=$(remove_excluded_sections "$DO_ASYNC_SECTIONS" "$EXCLUDED") - ASYNC_SQL=$(for section in $ASECTIONS ; do eval "sql_$section" ; done) - export ASYNC_SQL - - if [ "$DEBUGCONNECT" ] ; then - do_testmode $SID - else - do_sync_checks $SID "$SECTIONS" - run_cached oracle_$SID $CACHE_MAXAGE do_async_checks $SID - fi -} - -#. -# .--Main----------------------------------------------------------------. -# | __ __ _ | -# | | \/ | __ _(_)_ __ | -# | | |\/| |/ _` | | '_ \ | -# | | | | | (_| | | | | | | -# | |_| |_|\__,_|_|_| |_| | -# | | -# +----------------------------------------------------------------------+ -# | Iterate over all instances and execute sync and async sections. | -# '----------------------------------------------------------------------' - -# set some basic operating system stuff -set_osenv - -# Are there any remote configurations? -for element in $(compgen -A variable | ${GREP} -E "^REMOTE_INSTANCE_.*") ; do - - REMOTE_DBS=$REMOTE_DBS" "$element - - remote_hostname=$(echo $element | cut -d":" -f6) - - REMOTE_HOSTLIST=${remote_hostname}" "${REMOTE_HOSTLIST} -done - -if [ "$REMOTE_HOSTLIST" ] ; then - # remove duplicate hosts from list - REMOTE_HOSTLIST=$(echo $REMOTE_HOSTLIST | tr ' ' '\n' | sort | uniq) - - # create empty piggyback SECTIONS - for element in $REMOTE_HOSTLIST ; do - - remote_hostname=$(echo $element | cut -d":" -f6) - echo "<<<<"$remote_hostname">>>>" - - for section in $SYNC_SECTIONS $ASYNC_SECTIONS $SYNC_ASM_SECTIONS $ASYNC_ASM_SECTIONS - do - echo "<<>>" - done - - echo "<<<<>>>>" - done - - if [ ! -e "$MK_VARDIR/mk_oracle.found" ] ; then - touch "$MK_VARDIR/mk_oracle.found" - fi -fi - -# Get list of all running databases -# Do not work on ASM in this plugin. => Ignore a running ASM-Instance! -SIDS=$(UNIX95=true ps -ef | ${AWK} '{print $NF}' | ${GREP} -E '^asm_pmon_|^ora_pmon_|^xe_pmon_XE' | cut -d"_" -f3-) - -# If we do not have found any running database instance, then either -# no ORACLE is present on this system or it's just currently not running. -# In the later case we ouput empty agent sections so that Check_MK will be -# happy and execute the actual check functions. -if [ -z "$SIDS" -a ! -e "$MK_VARDIR/mk_oracle.found" ] ; then - exit -fi - -# From now on we expect databases on this system (for ever) -touch $MK_VARDIR/mk_oracle.found - -# Make sure that always all sections are present, even -# in case of an error. Note: the section <<>> -# section shows the general state of a database instance. If -# that section fails for an instance then all other sections -# do not contain valid data anyway. -for section in $SYNC_SECTIONS $ASYNC_SECTIONS $SYNC_ASM_SECTIONS $ASYNC_ASM_SECTIONS -do - echo "<<>>" -done - -for SID in $SIDS -do - - # Check if SID is listed in ONLY_SIDS if this is used - if [ "$ONLY_SIDS" ] ; then - SKIP=yes - for S in $ONLY_SIDS ; do - if [ "$S" = "$SID" ] ; then - SKIP= - break - fi - done - if [ "$SKIP" ] ; then continue ; fi - fi - - EXCLUDE=EXCLUDE_$SID - if [[ "$EXCLUDE" =~ ^[a-zA-Z][a-zA-Z0-9_]*$ ]] - then - # Handle explicit exclusion of instances - # but not for +ASM - EXCLUDE=${!EXCLUDE} - # SID filtered totally? - if [ "$EXCLUDE" = "ALL" ]; then - continue - fi - fi - - get_oraversion $SID - do_instance $SID -done - -for REMOTE_DB in $REMOTE_DBS ; do - REMOTE_DB_LINE=$(eval echo \${$REMOTE_DB}) - - # ORACLE_VERSION comes from mk_oracle.cfg! - ORACLE_VERSION=$(echo $REMOTE_DB_LINE | cut -d":" -f8) - - # the ORACLE_SID is needed for the oracle_instance check for ASM and normal instance - ORACLE_SID=$(echo $REMOTE_DB_LINE | cut -d":" -f7) - - # This is the piggyback hostname - remote_hostname=$(echo $element | cut -d":" -f6) - - get_oraversion $REMOTE_DB - do_instance $REMOTE_DB -done diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle.aix b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle.aix deleted file mode 100755 index 5c60e7af..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle.aix +++ /dev/null @@ -1,1152 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Check_MK agent plugin for monitoring ORACLE databases -# This plugin is a result of the common work of Thorsten Bruhns -# and Mathias Kettner. Thorsten is responsible for the ORACLE -# stuff, Mathias for the shell hacking... - -# Example for mk_oracle.cfg -# DBUSER=:::: -# ASMUSER=:::: -# -# SYSDBA or SYSASM is optional but needed for a mounted instance -# HOSTNAME is optional - Default is localhost -# PORT is optional - Default is 1521 - -while test $# -gt 0 -do - if [ "${1}" = '-d' ] ; then - set -xv ; DEBUG=1 - elif [ "${1}" = '-t' ] ; then - DEBUGCONNECT=1 - fi - shift -done - -if [ ! "$MK_CONFDIR" ] ; then - echo "MK_CONFDIR not set!" >&2 - exit 1 -fi - -if [ ! "$MK_VARDIR" ] ; then - export MK_VARDIR=$MK_CONFDIR -fi - - -# .--Config--------------------------------------------------------------. -# | ____ __ _ | -# | / ___|___ _ __ / _(_) __ _ | -# | | | / _ \| '_ \| |_| |/ _` | | -# | | |__| (_) | | | | _| | (_| | | -# | \____\___/|_| |_|_| |_|\__, | | -# | |___/ | -# +----------------------------------------------------------------------+ -# | The user can override and set variables in mk_oracle.cfg | -# '----------------------------------------------------------------------' - -# Sections that run fast and do no caching -SYNC_SECTIONS="instance sessions logswitches undostat recovery_area processes recovery_status longactivesessions dataguard_stats performance locks" - -# Sections that are run in the background and at a larger interval. -# Note: sections not listed in SYNC_SECTIONS or ASYNC_SECTIONS will not be -# executed at all! -ASYNC_SECTIONS="tablespaces rman jobs ts_quotas resumable" - -# Sections that are run in the background and at a larger interval. -# Note: _ASM_ sections are only executed when SID starts with '+' -# sections listed in SYNC_SECTIONS or ASYNC_SECTIONS are not -# executed for ASM. -SYNC_ASM_SECTIONS="instance" -ASYNC_ASM_SECTIONS="asm_diskgroup" - -# Interval for running async checks (in seconds) -CACHE_MAXAGE=600 - -# You can specify a list of SIDs to monitor. Those databases will -# only be handled, if they are found running, though! -# -# ONLY_SIDS="XE ORCL FOO BAR" -# -# It is possible to filter SIDS negatively. Just add the following to -# the mk_oracle.cfg file: -# -# EXCLUDE_="ALL" -# -# Another option is to filter single checks for SIDS. Just add -# lines as follows to the mk_oracle.cfg file. One service per -# line: -# -# EXCLUDE_="" -# -# For example skip oracle_sessions and oracle_logswitches checks -# for the instance "mysid". -# -# EXCLUDE_mysid="sessions logswitches" -# - -# Source the optional configuration file for this agent plugin -if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] -then - . $MK_CONFDIR/mk_oracle.cfg -fi - -#. -# .--SQL Queries---------------------------------------------------------. -# | ____ ___ _ ___ _ | -# | / ___| / _ \| | / _ \ _ _ ___ _ __(_) ___ ___ | -# | \___ \| | | | | | | | | | | |/ _ \ '__| |/ _ \/ __| | -# | ___) | |_| | |___ | |_| | |_| | __/ | | | __/\__ \ | -# | |____/ \__\_\_____| \__\_\\__,_|\___|_| |_|\___||___/ | -# | | -# +----------------------------------------------------------------------+ -# | The following functions create SQL queries for ORACLE and output | -# | them to stdout. All queries output the database name or the instane | -# | name as first column. | -# '----------------------------------------------------------------------' - -sql_performance() -{ - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "select upper(i.INSTANCE_NAME) - ||'|'|| 'sys_time_model' - ||'|'|| S.STAT_NAME - ||'|'|| Round(s.value/1000000) - from v\$instance i, - v\$sys_time_model s - where s.stat_name in('DB time', 'DB CPU') - order by s.stat_name; - select upper(i.INSTANCE_NAME) - ||'|'|| 'buffer_pool_statistics' - ||'|'|| b.name - ||'|'|| b.db_block_gets - ||'|'|| b.db_block_change - ||'|'|| b.consistent_gets - ||'|'|| b.physical_reads - ||'|'|| b.physical_writes - ||'|'|| b.FREE_BUFFER_WAIT - ||'|'|| b.BUFFER_BUSY_WAIT - from v\$instance i, V\$BUFFER_POOL_STATISTICS b; - select upper(i.INSTANCE_NAME) - ||'|'|| 'librarycache' - ||'|'|| b.namespace - ||'|'|| b.gets - ||'|'|| b.gethits - ||'|'|| b.pins - ||'|'|| b.pinhits - ||'|'|| b.reloads - ||'|'|| b.invalidations - from v\$instance i, V\$librarycache b;" - fi -} - -sql_tablespaces() -{ - echo 'PROMPT <<>>' - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE - ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY - ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE - ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks - ||'|'|| contents - from v\$database d , v\$instance i, ( - select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, - f.ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents - from dba_data_files f, dba_tablespaces t, dba_free_space fs - where f.tablespace_name = t.tablespace_name - and f.file_id = fs.file_id(+) - group by f.file_name, f.tablespace_name, f.status, f.autoextensible, - f.blocks, f.maxblocks, f.user_blocks, f.increment_by, f.online_status, - t.block_size, t.status, t.contents - UNION - select f.file_name, f.tablespace_name, f.status, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', - t.BLOCK_SIZE, t.status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' - from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh - WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id - GROUP BY th.instance, f.file_name, f.tablespace_name, f.status, - f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, - 'TEMP', t.block_size, t.status) - where d.database_role = 'PRIMARY'; - " - elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then - - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - || '|' || file_name ||'|'|| tablespace_name ||'|'|| fstatus ||'|'|| AUTOEXTENSIBLE - ||'|'|| blocks ||'|'|| maxblocks ||'|'|| USER_BLOCKS ||'|'|| INCREMENT_BY - ||'|'|| ONLINE_STATUS ||'|'|| BLOCK_SIZE - ||'|'|| decode(tstatus,'READ ONLY', 'READONLY', tstatus) || '|' || free_blocks - ||'|'|| contents - from v\$database d , v\$instance i, ( - select f.file_name, f.tablespace_name, f.status fstatus, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, - 'ONLINE' ONLINE_STATUS, t.BLOCK_SIZE, t.status tstatus, nvl(sum(fs.blocks),0) free_blocks, t.contents - from dba_data_files f, dba_tablespaces t, dba_free_space fs - where f.tablespace_name = t.tablespace_name - and f.file_id = fs.file_id(+) - group by f.file_name, f.tablespace_name, f.status, f.autoextensible, - f.blocks, f.maxblocks, f.user_blocks, f.increment_by, 'ONLINE', - t.block_size, t.status, t.contents - UNION - select f.file_name, f.tablespace_name, 'ONLINE' status, f.AUTOEXTENSIBLE, - f.blocks, f.maxblocks, f.USER_BLOCKS, f.INCREMENT_BY, 'TEMP', - t.BLOCK_SIZE, 'TEMP' status, sum(sh.blocks_free) free_blocks, 'TEMPORARY' - from v\$thread th, dba_temp_files f, dba_tablespaces t, v\$temp_space_header sh - WHERE f.tablespace_name = t.tablespace_name and f.file_id = sh.file_id - GROUP BY th.instance, f.file_name, f.tablespace_name, 'ONLINE', - f.autoextensible, f.blocks, f.maxblocks, f.user_blocks, f.increment_by, - 'TEMP', t.block_size, t.status); - " - fi -} - -sql_dataguard_stats() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| upper(d.DB_UNIQUE_NAME) - ||'|'|| d.DATABASE_ROLE - ||'|'|| ds.name - ||'|'|| ds.value - FROM v\$database d - JOIN v\$parameter vp on 1=1 - JOIN v\$instance i on 1=1 - left outer join V\$dataguard_stats ds on 1=1 - WHERE vp.name = 'log_archive_config' - AND vp.value is not null - ORDER BY 1; - " - fi -} - -sql_recovery_status() -{ - echo 'PROMPT <<>>' - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| d.DB_UNIQUE_NAME - ||'|'|| d.DATABASE_ROLE - ||'|'|| d.open_mode - ||'|'|| dh.file# - ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) - ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) - ||'|'|| dh.STATUS - ||'|'|| dh.RECOVER - ||'|'|| dh.FUZZY - ||'|'|| dh.CHECKPOINT_CHANGE# - FROM V\$datafile_header dh, v\$database d, v\$instance i - ORDER BY dh.file#; - " - elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| d.DATABASE_ROLE - ||'|'|| d.open_mode - ||'|'|| dh.file# - ||'|'|| round((dh.CHECKPOINT_TIME-to_date('01.01.1970','dd.mm.yyyy'))*24*60*60) - ||'|'|| round((sysdate-dh.CHECKPOINT_TIME)*24*60*60) - ||'|'|| dh.STATUS - ||'|'|| dh.RECOVER - ||'|'|| dh.FUZZY - ||'|'|| dh.CHECKPOINT_CHANGE# - FROM V\$datafile_header dh, v\$database d, v\$instance i - ORDER BY dh.file#; - " - fi -} - -sql_rman() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "select /*"$HINT_RMAN" check_mk rman1 */ upper(name) - || '|'|| 'COMPLETED' - || '|'|| to_char(COMPLETION_TIME, 'YYYY-mm-dd_HH24:MI:SS') - || '|'|| to_char(COMPLETION_TIME, 'YYYY-mm-dd_HH24:MI:SS') - || '|'|| case when INCREMENTAL_LEVEL IS NULL - then 'DB_FULL' - else 'DB_INCR' - end - || '|'|| INCREMENTAL_LEVEL - || '|'|| round(((sysdate-COMPLETION_TIME) * 24 * 60), 0) - || '|'|| INCREMENTAL_CHANGE# - from (select upper(decode(${IGNORE_DB_NAME:-0}, 0, vd.NAME, i.instance_name)) name - , bd2.INCREMENTAL_LEVEL, bd2.INCREMENTAL_CHANGE#, min(bd2.COMPLETION_TIME) COMPLETION_TIME - from (select bd.file#, bd.INCREMENTAL_LEVEL, max(bd.COMPLETION_TIME) COMPLETION_TIME - from v\$backup_datafile bd - join v\$datafile_header dh on dh.file# = bd.file# - where dh.status = 'ONLINE' - group by bd.file#, bd.INCREMENTAL_LEVEL - ) bd - join v\$backup_datafile bd2 on bd2.file# = bd.file# - and bd2.COMPLETION_TIME = bd.COMPLETION_TIME - join v\$database vd on vd.RESETLOGS_CHANGE# = bd2.RESETLOGS_CHANGE# - join v\$instance i on 1=1 - group by upper(decode(${IGNORE_DB_NAME:-0}, 0, vd.NAME, i.instance_name)) - , bd2.INCREMENTAL_LEVEL - , bd2.INCREMENTAL_CHANGE# - order by name, bd2.INCREMENTAL_LEVEL); - - select /*"$HINT_RMAN" check_mk rman2 */ name - || '|' || 'COMPLETED' - || '|' - || '|' || to_char(CHECKPOINT_TIME, 'yyyy-mm-dd_hh24:mi:ss') - || '|' || 'CONTROLFILE' - || '|' - || '|' || round((sysdate - CHECKPOINT_TIME) * 24 * 60) - || '|' || '0' - from (select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) name - ,max(bcd.CHECKPOINT_TIME) CHECKPOINT_TIME - from v\$database d - join V\$BACKUP_CONTROLFILE_DETAILS bcd on d.RESETLOGS_CHANGE# = bcd.RESETLOGS_CHANGE# - join v\$instance i on 1=1 - group by upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ); - - select /*"$HINT_RMAN" check_mk rman3 */ name - || '|COMPLETED' - || '|'|| to_char(sysdate, 'YYYY-mm-dd_HH24:MI:SS') - || '|'|| to_char(completed, 'YYYY-mm-dd_HH24:MI:SS') - || '|ARCHIVELOG||' - || round((sysdate - completed)*24*60,0) - || '|' - from ( - select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) name - , max(a.completion_time) completed - , case when a.backup_count > 0 then 1 else 0 end - from v\$archived_log a, v\$database d, v\$instance i - where a.backup_count > 0 - and a.dest_id in - (select b.dest_id - from v\$archive_dest b - where b.target = 'PRIMARY' - and b.SCHEDULE = 'ACTIVE' - ) - group by d.NAME, i.instance_name - , case when a.backup_count > 0 then 1 else 0 end);" - fi -} - -sql_recovery_area() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||' '|| round((SPACE_USED-SPACE_RECLAIMABLE)/ - (CASE NVL(SPACE_LIMIT,1) WHEN 0 THEN 1 ELSE SPACE_LIMIT END)*100) - ||' '|| round(SPACE_LIMIT/1024/1024) - ||' '|| round(SPACE_USED/1024/1024) - ||' '|| round(SPACE_RECLAIMABLE/1024/1024) - from V\$RECOVERY_FILE_DEST, v\$database d, v\$instance i; - " - fi -} - -sql_undostat() -{ - echo 'PROMPT <<>>' - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo "select upper(i.INSTANCE_NAME) - ||'|'|| ACTIVEBLKS - ||'|'|| MAXCONCURRENCY - ||'|'|| TUNED_UNDORETENTION - ||'|'|| maxquerylen - ||'|'|| NOSPACEERRCNT - from v\$instance i, - (select * from (select * - from v\$undostat order by end_time desc - ) - where rownum = 1 - and TUNED_UNDORETENTION > 0 - ); - " - elif [ "$AT_LEAST_ORACLE_92" = 'yes' ] ; then - # TUNED_UNDORETENTION and ACTIVEBLKS are not availibe in Oracle <=9.2! - # we sent a -1 for filtering in check_undostat - echo "select upper(i.INSTANCE_NAME) - ||'|-1' - ||'|'|| MAXCONCURRENCY - ||'|-1' - ||'|'|| maxquerylen - ||'|'|| NOSPACEERRCNT - from v\$instance i, - (select * from (select * - from v\$undostat order by end_time desc - ) - where rownum = 1 - ); - " - fi -} - -sql_resumable() -{ - echo 'PROMPT <<>>' - echo "select upper(i.INSTANCE_NAME) - ||'|'|| u.username - ||'|'|| a.SESSION_ID - ||'|'|| a.status - ||'|'|| a.TIMEOUT - ||'|'|| round((sysdate-to_date(a.SUSPEND_TIME,'mm/dd/yy hh24:mi:ss'))*24*60*60) - ||'|'|| a.ERROR_NUMBER - ||'|'|| to_char(to_date(a.SUSPEND_TIME, 'mm/dd/yy hh24:mi:ss'),'mm/dd/yy_hh24:mi:ss') - ||'|'|| a.RESUME_TIME - ||'|'|| a.ERROR_MSG - from dba_resumable a, v\$instance i, dba_users u - where a.INSTANCE_ID = i.INSTANCE_NUMBER - and u.user_id = a.user_id - and a.SUSPEND_TIME is not null - union all - select upper(i.INSTANCE_NAME) - || '|||||||||' - from v\$instance i -; - " -} - -sql_jobs() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'PROMPT <<>>' - echo "SELECT upper(decode(${IGNORE_DB_NAME:-0}, 0, vd.NAME, i.instance_name)) - ||'|'|| j.OWNER - ||'|'|| j.JOB_NAME - ||'|'|| j.STATE - ||'|'|| ROUND((TRUNC(sysdate) + j.LAST_RUN_DURATION - TRUNC(sysdate)) * 86400) - ||'|'|| j.RUN_COUNT - ||'|'|| j.ENABLED - ||'|'|| NVL(j.NEXT_RUN_DATE, to_date('1970-01-01', 'YYYY-mm-dd')) - ||'|'|| NVL(j.SCHEDULE_NAME, '-') - ||'|'|| jd.STATUS - FROM dba_scheduler_jobs j - join v\$database vd on 1 = 1 - join v\$instance i on 1 = 1 - left outer join (SELECT owner, job_name, max(LOG_ID) log_id - FROM dba_scheduler_job_run_details dd - group by owner, job_name - ) jm on jm.JOB_NAME = j.JOB_NAME - and jm.owner=j.OWNER - left outer join dba_scheduler_job_run_details jd - on jd.owner = jm.OWNER - AND jd.JOB_NAME = jm.JOB_NAME - AND jd.LOG_ID = jm.LOG_ID; - " - fi -} - -sql_ts_quotas() -{ - echo 'PROMPT <<>>' - echo "select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|'|| Q.USERNAME - ||'|'|| Q.TABLESPACE_NAME - ||'|'|| Q.BYTES - ||'|'|| Q.MAX_BYTES - from dba_ts_quotas Q, v\$database d, v\$instance i - where max_bytes > 0 - union all - select upper(decode(${IGNORE_DB_NAME:-0}, 0, d.NAME, i.instance_name)) - ||'|||' - from v\$database d, v\$instance i - order by 1; - " -} - -sql_version() -{ - echo 'PROMPT <<>>' - echo "select upper(i.INSTANCE_NAME) - || ' ' || banner - from v\$version, v\$instance i - where banner like 'Oracle%';" -} - -sql_instance() -{ - echo 'prompt <<>>' - if [ ${ORACLE_SID:0:1} = '+' ] ; then - # ASM - echo "select upper(i.instance_name) - || '|' || i.VERSION - || '|' || i.STATUS - || '|' || i.LOGINS - || '|' || i.ARCHIVER - || '|' || round((sysdate - i.startup_time) * 24*60*60) - || '|' || '0' - || '|' || 'NO' - || '|' || 'ASM' - || '|' || 'NO' - || '|' || i.instance_name - from v\$instance i; - " - else - # normal Instance - echo "select upper(i.instance_name) - || '|' || i.VERSION - || '|' || i.STATUS - || '|' || i.LOGINS - || '|' || i.ARCHIVER - || '|' || round((sysdate - i.startup_time) * 24*60*60) - || '|' || DBID - || '|' || LOG_MODE - || '|' || DATABASE_ROLE - || '|' || FORCE_LOGGING - || '|' || d.name - from v\$instance i, v\$database d; - " - fi -} - -sql_sessions() -{ - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || ' ' || CURRENT_UTILIZATION - from v\$resource_limit, v\$instance i - where RESOURCE_NAME = 'sessions'; - " -} - -sql_processes() -{ - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || ' ' || CURRENT_UTILIZATION - || ' ' || ltrim(rtrim(LIMIT_VALUE)) - from v\$resource_limit, v\$instance i - where RESOURCE_NAME = 'processes'; - " -} - -sql_logswitches() -{ - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || ' ' || logswitches - from v\$instance i , - (select count(1) logswitches - from v\$loghist h , v\$instance i - where h.first_time > sysdate - 1/24 - and h.thread# = i.instance_number - ); - " -} - -sql_locks() -{ - if [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || '|' || b.sid - || '|' || b.serial# - || '|' || b.machine - || '|' || b.program - || '|' || b.process - || '|' || b.osuser - || '|' || b.username - || '|' || b.SECONDS_IN_WAIT - || '|' || b.BLOCKING_SESSION_STATUS - || '|' || bs.inst_id - || '|' || bs.sid - || '|' || bs.serial# - || '|' || bs.machine - || '|' || bs.program - || '|' || bs.process - || '|' || bs.osuser - || '|' || bs.username - from v\$session b - join v\$instance i on 1=1 - join gv\$session bs on bs.inst_id = b.BLOCKING_INSTANCE - and bs.sid = b.BLOCKING_SESSION - where b.BLOCKING_SESSION is not null -; - select upper(i.instance_name) - || '|||||||||||||||||' - from v\$instance i -; - " - fi -} - -sql_locks_old() -{ - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo 'prompt <<>>' - echo "SET SERVEROUTPUT ON feedback off -DECLARE - type x is table of varchar2(20000) index by pls_integer; - xx x; -begin - begin - execute immediate 'select upper(i.instance_name) - || ''|'' || a.sid - || ''|'' || b.serial# - || ''|'' || b.machine - || ''|'' || b.program - || ''|'' || b.process - || ''|'' || b.osuser - || ''|'' || a.ctime - || ''|'' || decode(c.owner,NULL,''NULL'',c.owner) - || ''|'' || decode(c.object_name,NULL,''NULL'',c.object_name) - from V\$LOCK a, v\$session b, dba_objects c, v\$instance i - where (a.id1, a.id2, a.type) - IN (SELECT id1, id2, type - FROM GV\$LOCK - WHERE request>0 - ) - and request=0 - and a.sid = b.sid - and a.id1 = c.object_id (+) - union all - select upper(i.instance_name) || ''|||||||||'' - from v\$instance i' - bulk collect into xx; - if xx.count >= 1 then - for i in 1 .. xx.count loop - dbms_output.put_line(xx(i)); - end loop; - end if; - exception - when others then - for cur1 in (select upper(i.instance_name) instance_name from v\$instance i) loop - dbms_output.put_line(cur1.instance_name || '|||||||||'||sqlerrm); - end loop; - end; -END; -/ -set serverout off -" - fi -} - -sql_longactivesessions() -{ - if [ "$AT_LEAST_ORACLE_101" = 'yes' ] ; then - echo 'prompt <<>>' - echo "select upper(i.instance_name) - || '|' || s.sid - || '|' || s.serial# - || '|' || s.machine - || '|' || s.process - || '|' || s.osuser - || '|' || s.program - || '|' || s.last_call_et - || '|' || s.sql_id - from v\$session s, v\$instance i - where s.status = 'ACTIVE' - and type != 'BACKGROUND' - and s.username is not null - and s.username not in('PUBLIC') - and s.last_call_et > 60*60 - union all - select upper(i.instance_name) - || '||||||||' - from v\$instance i; - " - fi -} - -sql_asm_diskgroup() -{ - echo 'prompt <<>>' - if [ "$AT_LEAST_ORACLE_112" = 'yes' ] ; then - echo "select STATE - || ' ' || TYPE - || ' ' || 'N' - || ' ' || sector_size - || ' ' || block_size - || ' ' || allocation_unit_size - || ' ' || total_mb - || ' ' || free_mb - || ' ' || required_mirror_free_mb - || ' ' || usable_file_mb - || ' ' || offline_disks - || ' ' || voting_files - || ' ' || name || '/' - from v\$asm_diskgroup; - " - elif [ "$AT_LEAST_ORACLE_102" = 'yes' ] ; then - echo "select STATE - || ' ' || TYPE - || ' ' || 'N' - || ' ' || sector_size - || ' ' || block_size - || ' ' || allocation_unit_size - || ' ' || total_mb - || ' ' || free_mb - || ' ' || required_mirror_free_mb - || ' ' || usable_file_mb - || ' ' || offline_disks - || ' ' || 'N' - || ' ' || name || '/' - from v\$asm_diskgroup; - " - fi -} - -#. -# .--oraenv--------------------------------------------------------------. -# | | -# | ___ _ __ __ _ ___ _ ____ __ | -# | / _ \| '__/ _` |/ _ \ '_ \ \ / / | -# | | (_) | | | (_| | __/ | | \ V / | -# | \___/|_| \__,_|\___|_| |_|\_/ | -# | | -# +----------------------------------------------------------------------+ -# | Functions for getting the Oracle environment | -# '----------------------------------------------------------------------' - -function set_oraenv () { - ORACLE_SID=${1} - - test -f /etc/oratab && ORATAB=/etc/oratab - # /var/opt/oracle/oratab is needed for Oracle Solaris - test -f /var/opt/oracle/oratab && ORATAB=/var/opt/oracle/oratab - test -f ${ORATAB:-""} || echo "ORA-99999 oratab not found" - test -f ${ORATAB:-""} || exit 1 - - ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID}":" | cut -d":" -f2) - if [ -z $ORACLE_HOME ] ; then - # cut last number from SID for Oracle RAC to find entry in oratab - ORACLE_SID_SHORT=$(echo $ORACLE_SID | sed "s/[0-9]$//") - ORACLE_HOME=$(cat ${ORATAB} | grep "^"${ORACLE_SID_SHORT}":" | cut -d":" -f2) - fi - - LD_LIBRARY_PATH=$ORACLE_HOME/lib - - if [ ! -d ${ORACLE_HOME:-"not_found"} ] ; then - echo "ORA-99999 ORACLE_HOME for ORACLE_SID="$ORACLE_SID" not found or not existing!" - exit 1 - fi - - TNS_ADMIN=${TNS_ADMIN:-$MK_CONFDIR} - - test -f ${TNS_ADMIN}/sqlnet.ora || ( echo "ORA-99998 Couldn't find "${TNS_ADMIN}/sqlnet.ora ; exit 1) - - export ORACLE_HOME TNS_ADMIN ORACLE_SID LD_LIBRARY_PATH -} - -function get_oraversion () { - set_oraenv ${1} - ORACLE_VERSION=$($ORACLE_HOME/bin/sqlplus -V | grep ^SQL | cut -d" " -f3 | cut -d"." -f-2) - - # remove possible existing variables - unset AT_LEAST_ORACLE_121 AT_LEAST_ORACLE_112 AT_LEAST_ORACLE_111 AT_LEAST_ORACLE_102 AT_LEAST_ORACLE_101 AT_LEAST_ORACLE_92 - - if [ "$ORACLE_VERSION" = '12.2' ] ; then - AT_LEAST_ORACLE_122=yes - fi - - if [ "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' ] ; then - AT_LEAST_ORACLE_121=yes - fi - - if [ "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' ] ; then - AT_LEAST_ORACLE_112=yes - fi - - if [ "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' ] ; then - AT_LEAST_ORACLE_111=yes - fi - - if [ "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' ] ; then - AT_LEAST_ORACLE_102=yes - fi - - if [ "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ - -o "$ORACLE_VERSION" = '10.1' ] ; then - AT_LEAST_ORACLE_101=yes - fi - - if [ "$ORACLE_VERSION" = '12.2' -o "$ORACLE_VERSION" = '12.1' -o "$ORACLE_VERSION" = '11.2' \ - -o "$ORACLE_VERSION" = '11.1' -o "$ORACLE_VERSION" = '10.2' \ - -o "$ORACLE_VERSION" = '10.1' -o "$ORACLE_VERSION" = '9.2' ] ; then - AT_LEAST_ORACLE_92=yes - fi -} - -#. -# .--Functions-----------------------------------------------------------. -# | _____ _ _ | -# | | ___| _ _ __ ___| |_(_) ___ _ __ ___ | -# | | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | -# | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | -# | |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | -# | | -# +----------------------------------------------------------------------+ -# | Helper functions | -# '----------------------------------------------------------------------' - -function sqlplus_internal() { - loc_stdin=$(cat) - set_oraenv $SID - - # reload mk_oracle.cfg for run_cached. Otherwise some variables are missing - if [ -e "$MK_CONFDIR/mk_oracle.cfg" ] - then - . $MK_CONFDIR/mk_oracle.cfg - fi - - # mk_oracle_dbusers.conf is for compatibility. Do not use it anymore - ORACLE_USERCONF=${MK_CONFDIR}/mk_oracle_dbuser.conf - - TNSPINGOK=no - if [ -f ${TNS_ADMIN}/tnsnames.ora ] ; then - if "${ORACLE_HOME}"/bin/tnsping "${ORACLE_SID}" >/dev/null 2>&1 ; then - TNSALIAS=$ORACLE_SID - TNSPINGOK=yes - fi - fi - - ORADBUSER="" - DBPASSWORD="" - - # ASM use '+' as 1st character in SID! - if [ ${ORACLE_SID:0:1} = '+' ] ; then - ORACFGLINE=${ASMUSER} - else - # use an individuel user or the default DBUSER from mk_oracle.cfg - dummy="DBUSER_"${ORACLE_SID} - ORACFGLINE=${!dummy} - if [ "$ORACFGLINE" = '' ] ; then - ORACFGLINE=${DBUSER} - fi - fi - - if [ -f ${ORACLE_USERCONF} -a "${ORACFGLINE}" = '' ] ; then - # mk_oracle_dbuser.conf - ORACFGLINE=$(cat ${ORACLE_USERCONF} | grep "^"${ORACLE_SID}":") - # mk_oracle_dbuser has ORACLE_SID as 1. parameter. we need an offset for all values - offset=1 - else - # mk_oracle.cfg - offset=0 - fi - - ORADBUSER=$(echo ${ORACFGLINE} | cut -d":" -f$[1+offset]) - DBPASSWORD=$(echo ${ORACFGLINE} | cut -d":" -f$[2+offset]) - DBSYSCONNECT=$(echo ${ORACFGLINE} | cut -d":" -f$[3+offset]) - DBHOST=$(echo ${ORACFGLINE} | cut -d":" -f$[4+offset]) - DBPORT=$(echo ${ORACFGLINE} | cut -d":" -f$[5+offset]) - - if [ ! "${ORACFGLINE}" ] ; then - # no configuration found - # => use the wallet with tnsnames.ora or EZCONNECT - TNSALIAS=${TNSALIAS:-"localhost:1521/${ORACLE_SID}"} - else - if [ ${DBSYSCONNECT} ] ; then - assysdbaconnect=" as "${DBSYSCONNECT} - fi - TNSALIAS=${TNSALIAS:-"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=${DBHOST:-"localhost"})(PORT=${DBPORT:-1521}))(CONNECT_DATA=(SID=${ORACLE_SID})(SERVER=DEDICATED)(UR=A)))"} - - # ORADBUSER = '/'? => ignore DBPASSWORD and use the wallet - if [ "${ORADBUSER}" = '/' ] ; then - # connect with / and wallet - ORADBUSER="" - DBPASSWORD="" - if [ "$TNSPINGOK" = 'no' ] ; then - # create an EZCONNECT string when no tnsnames.ora is usable - # defaults to localhost:1521/ - TNSALIAS="${DBHOST:-"localhost"}:${DBPORT:-1521}/${ORACLE_SID}" - fi - fi - fi - - DBCONNECT="${ORADBUSER}/${DBPASSWORD}@${TNSALIAS}${assysdbaconnect}" - - SQLPLUS=${ORACLE_HOME}/bin/sqlplus - if [ ! -x ${SQLPLUS} ] ; then - echo "sqlplus not found or ORACLE_HOME wrong! " - echo "SQLPLUS="${SQLPLUS} - return 1 - fi - - echo "$loc_stdin" | ${SQLPLUS} -L -s ${DBCONNECT} - if [ $? -ne 0 ] ; then - if [ "$DEBUGCONNECT" ] ; then - echo "Logindetails: ${DBCONNECT}" >&2 - fi - return 1 - fi -} - -function ora_session_environment() -{ - echo 'set pages 0 trimspool on feedback off lines 8000' - if [ "$AT_LEAST_ORACLE_102" = 'yes' -a ! "$DISABLE_ORA_SESSION_SETTINGS" ] ; then - echo 'set echo off' - echo 'alter session set "_optimizer_mjc_enabled"=false;' - - # cursor_sharing is not valid for ASM instances - if [ ! ${ORACLE_SID:0:1} = '+' ] ; then - echo 'alter session set cursor_sharing=exact;' - fi - - echo 'set echo on' - fi - echo 'whenever sqlerror exit 1' - echo ' ' -} - -# Helper function that calls an SQL statement with a clean output -# Usage: echo "..." | sqlplus SID -function sqlplus () -{ - local SID=$1 - loc_stdin=$(cat) - loc_stdin=$(ora_session_environment)${loc_stdin} - - # use sqlplus_internal when no sqlplus.sh is found - SQLPLUS="$MK_CONFDIR"/sqlplus.sh - test -f "$SQLPLUS" || SQLPLUS=sqlplus_internal - - if OUTPUT=$(echo "$loc_stdin" | "$SQLPLUS" $SID) - then - echo "$OUTPUT" - else - echo '<<>>' - local SID_UPPER=$(echo "$SID" | tr '[:lower:]' '[:upper:]') - echo "$OUTPUT" | grep -v "^ERROR at line" | tr '\n' ' ' | sed "s/^/$SID_UPPER|FAILURE|/" ; echo - return 1 - fi -} - -function remove_excluded_sections () -{ - local sections="$1" - local excluded="$2" - local result="" - for section in $sections - do - local skip= - for exclude in $excluded - do - if [ "$exclude" = "$section" ] ; then - local skip=yes - break - fi - done - if [ "$skip" != yes ] ; then - result="$result $section" - fi - done - echo "$result" -} - - -# Create one SQL statements for several sections and run -# these with sqlplus. The exitcode is preserved. -function do_sync_checks () -{ - local SID=$1 - local SECTIONS="$2" - for section in $SECTIONS - do - eval "sql_$section" - done | sqlplus $SID -} - -function do_async_checks () -{ - local SID=$1 - echo "$ASYNC_SQL" | sqlplus $SID -} - -# Make sure that the new shell that is being run by run_cached inherits -# our functions -export -f sqlplus -export -f ora_session_environment -export -f sqlplus_internal -export -f do_async_checks -export -f set_oraenv - -function file_mtime() { - /usr/bin/perl -e 'if (! -f $ARGV[0]){die "0000000"};$mtime=(stat($ARGV[0]))[9];print ($mtime);' "$1" -} - -function run_cached_local () { - local section= - if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi - local NAME=$1 - local MAXAGE=$2 - shift 2 - local CMDLINE="$section$@" - - if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi - CACHEFILE="$MK_VARDIR/cache/$NAME.cache" - - # Check if the creation of the cache takes suspiciously long and return - # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE - local NOW=$(date +%s) - if [ -e "$CACHEFILE.new" ] ; then - CF_ATIME=$(file_mtime "$CACHEFILE.new") - if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then - # Kill the process still accessing that file in case - # it is still running. This avoids overlapping processes! - fuser -k -9 "$CACHEFILE.new" >/dev/null 2>&1 - rm -f "$CACHEFILE.new" - return - fi - fi - - # Check if cache file exists and is recent enough - if [ -s "$CACHEFILE" ] ; then - MTIME=$(file_mtime "$CACHEFILE") - if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi - # Output the file in any case, even if it is - # outdated. The new file will not yet be available - sed "/^<<>>$/:cached($MTIME,$MAXAGE)>>>/" "$CACHEFILE" - fi - - # Cache file outdated and new job not yet running? Start it - if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then - if [ "$DEBUG" ] ; then - echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | /bin/bash - else - # When the command fails, the output is throws away ignored - echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup /bin/bash >/dev/null 2>&1 & - fi - fi -} - -#. -# .--Main----------------------------------------------------------------. -# | __ __ _ | -# | | \/ | __ _(_)_ __ | -# | | |\/| |/ _` | | '_ \ | -# | | | | | (_| | | | | | | -# | |_| |_|\__,_|_|_| |_| | -# | | -# +----------------------------------------------------------------------+ -# | Iterate over all instances and execute sync and async sections. | -# '----------------------------------------------------------------------' - -# Get list of all running databases -# Do not work on ASM in this plugin. => Ignore a running ASM-Instance! -SIDS=$(UNIX95=true ps -ef | awk '{print $NF}' | grep -E '^asm_pmon_|^ora_pmon_|^xe_pmon_XE' | cut -d"_" -f3-) - -# If we do not have found any running database instance, then either -# no ORACLE is present on this system or it's just currently not running. -# In the later case we ouput empty agent sections so that Check_MK will be -# happy and execute the actual check functions. -if [ -z "$SIDS" -a ! -e "$MK_VARDIR/mk_oracle.found" ] ; then - exit -fi - -# From now on we expect databases on this system (for ever) -touch $MK_VARDIR/mk_oracle.found - -# Make sure that always all sections are present, even -# in case of an error. Note: the section <<>> -# section shows the general state of a database instance. If -# that section fails for an instance then all other sections -# do not contain valid data anyway. -for section in $SYNC_SECTIONS $ASYNC_SECTIONS $SYNC_ASM_SECTIONS $ASYNC_ASM_SECTIONS -do - echo "<<>>" -done - -for SID in $SIDS -do - # We need the SID in uppercase at later time - SID_UPPER=$(echo $SID | tr '[:lower:]' '[:upper:]') - - # Check if SID is listed in ONLY_SIDS if this is used - if [ "$ONLY_SIDS" ] ; then - SKIP=yes - for S in $ONLY_SIDS ; do - if [ "$S" = "$SID" ] ; then - SKIP= - break - fi - done - if [ "$SKIP" ] ; then continue ; fi - fi - - EXCLUDE=EXCLUDE_$SID - if [[ "$EXCLUDE" =~ ^[a-zA-Z][a-zA-Z0-9_]*$ ]] - then - # Handle explicit exclusion of instances - # but not for +ASM - EXCLUDE=${!EXCLUDE} - # SID filtered totally? - if [ "$EXCLUDE" = "ALL" ]; then - continue - fi - fi - - if [ ${SID:0:1} = '+' ] ; then - DO_ASYNC_SECTIONS=${ASYNC_ASM_SECTIONS} - DO_SYNC_SECTIONS=${SYNC_ASM_SECTIONS} - else - # switch sections to ASM - DO_SYNC_SECTIONS=${SYNC_SECTIONS} - DO_ASYNC_SECTIONS=${ASYNC_SECTIONS} - fi - - get_oraversion $SID - - # Do sync checks - EXCLUDED=$(eval 'echo $EXCLUDE'"_$SID") - SECTIONS=$(remove_excluded_sections "$DO_SYNC_SECTIONS" "$EXCLUDED") - - # Do async checks - ASECTIONS=$(remove_excluded_sections "$DO_ASYNC_SECTIONS" "$EXCLUDED") - ASYNC_SQL=$(for section in $ASECTIONS ; do eval "sql_$section" ; done) - export ASYNC_SQL - - if [ "$DEBUGCONNECT" ] ; then - echo "-----------------------------------------------" - echo "Logincheck to Instance: "$SID" Version: "$ORACLE_VERSION - echo "select 'Login ok User: ' || user || ' on ' || host_name - from v\$instance;" | sqlplus $SID - echo "SYNC_SECTIONS=$SECTIONS" - echo "ASYNC_SECTIONS=$ASECTIONS" - if [ "$IGNORE_DB_NAME" ] ; then - echo "IGNORE_DB_NAME found. Ignoring DB_NAME in all SQLs!" - fi - if [ "$DISABLE_ORA_SESSION_SETTINGS" ] ; then - echo "Paramter DISABLE_ORA_SESSION_SETTINGS found!" - fi - if [ "$HINT_RMAN" ] ; then - echo "Using HINT_RMAN for this Instance!" - fi - # do not execute any check - continue - fi - - do_sync_checks $SID "$SECTIONS" - - run_cached_local oracle_$SID $CACHE_MAXAGE do_async_checks $SID - -done diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle_asm b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle_asm deleted file mode 100755 index c29b7391..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle_asm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Check_MK agent plugin for monitoring ORACLE ASM - -if [ ! -e $MK_CONFDIR/asmcmd.sh ]; then - echo "$MK_CONFDIR/asmcmd.sh does not exist." >&2 - exit 1 -fi - -function asmcmd () { - $MK_CONFDIR/asmcmd.sh "$@" -} - -echo "<<>>" -asmcmd lsdg | grep -v State | grep -v "The Oracle" diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle_crs b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle_crs deleted file mode 100755 index 15efd85c..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_oracle_crs +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Developed by Thorsten Bruhns from OPITZ CONSULTING Deutschland GmbH - -set -f - -ocrcfgfile=/etc/oracle/ocr.loc -olrcfgfile=/etc/oracle/olr.loc -resourcefilter="^NAME=|^TYPE=|^STATE=|^TARGET=|^ENABLED=" - -# .--Functions-----------------------------------------------------------. -# | _____ _ _ | -# | | ___| _ _ __ ___| |_(_) ___ _ __ ___ | -# | | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __| | -# | | _|| |_| | | | | (__| |_| | (_) | | | \__ \ | -# | |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/ | -# | | -# +----------------------------------------------------------------------+ -# | | -# '----------------------------------------------------------------------' - -function set_has_env(){ - test -f ${ocrcfgfile} || exit 0 - local_has_type=$(cat $ocrcfgfile | grep "^local_only=" | cut -d"=" -f2 | tr '[:lower:]' '[:upper:]') - local_has_type=${local_has_type:-"FALSE"} - - if [ -f ${olrcfgfile} ] ; then - has_ORACLE_HOME=$(cat $olrcfgfile | grep "^crs_home=" | cut -d"=" -f2) - else - # There is no olr.cfg in 10.2 and 11.1 - # we try to get the ORA_CRS_HOME from /etc/init.d/init.cssd - local_has_type=FALSE - INITCSSD=/etc/init.d/init.cssd - if [ ! -f ${INITCSSD} ] ; then - exit 0 - else - has_ORACLE_HOME=$(grep "^ORA_CRS_HOME=" ${INITCSSD} | cut -d"=" -f2-) - fi - fi - - CRSCTL=${has_ORACLE_HOME}/bin/crsctl - OLSNODES=${has_ORACLE_HOME}/bin/olsnodes - CRS_STAT=${has_ORACLE_HOME}/bin/crs_stat -} - -function printhasdata() { - ps -e | grep cssd.bin > /dev/null || exit 0 - - echo "<<>>" - $CRSCTL query has releaseversion - - echo "<<>>" - $CRSCTL stat res -f | grep -E $resourcefilter -} - -function printcrsdata() { - ps -e | grep -e ohasd.bin -e crsd.bin > /dev/null || exit 0 - - echo "<<>>" - crs_version=$($CRSCTL query crs releaseversion) - echo $crs_version - - echo "<<>>" - $CRSCTL query css votedisk | grep "^ [0-9]" - - ps -e | grep crsd.bin > /dev/null || exit 0 - echo "<<>>" - OLS_NODENAME=$($OLSNODES -l) - - echo "nodename|"$OLS_NODENAME - - crs_version_short=$(echo $crs_version | cut -d"[" -f2- | cut -d"." -f-2 | sed 's/\.//') - if [ $(($crs_version_short)) -ge 112 ] ; then - $CRSCTL stat res -v -n $OLS_NODENAME -init | grep -E $resourcefilter | sed "s/^/csslocal\|/" - for nodelist in $($OLSNODES) - do - $CRSCTL stat res -v -n $nodelist | grep -E $resourcefilter | sed "s/^/$nodelist\|/" - done - else - $CRS_STAT -f -c $OLS_NODENAME | grep -E $resourcefilter | sed "s/^/$OLS_NODENAME\|/" - fi -} - -# .--Main----------------------------------------------------------------. -# | __ __ _ | -# | | \/ | __ _(_)_ __ | -# | | |\/| |/ _` | | '_ \ | -# | | | | | (_| | | | | | | -# | |_| |_|\__,_|_|_| |_| | -# | | -# +----------------------------------------------------------------------+ -# | | -# '----------------------------------------------------------------------' - -set_has_env -echo "<<>>" -echo "<<>>" -echo "<<>>" -if [ $local_has_type = 'FALSE' ] ; then - printcrsdata -else - printhasdata -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_postgres b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_postgres deleted file mode 100755 index f981bdf7..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_postgres +++ /dev/null @@ -1,485 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2015 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -# TODO postgres_connections output format - - -# .--common funcs--------------------------------------------------------. -# | __ | -# | ___ ___ _ __ ___ _ __ ___ ___ _ __ / _|_ _ _ __ ___ ___ | -# | / __/ _ \| '_ ` _ \| '_ ` _ \ / _ \| '_ \ | |_| | | | '_ \ / __/ __| | -# || (_| (_) | | | | | | | | | | | (_) | | | || _| |_| | | | | (__\__ \ | -# | \___\___/|_| |_| |_|_| |_| |_|\___/|_| |_||_| \__,_|_| |_|\___|___/ | -# | | -# '----------------------------------------------------------------------' - - -function compare_version_greater_equal() { - local GREATER_ONE - GREATER_ONE=$(echo "$1 $2" | awk '{if ($1 >= $2) print $1; else print $2}') - if [ "$GREATER_ONE" == "$1" ] ; then - return 0 - else - return 1 - fi -} - - -#. -# .--section funcs-------------------------------------------------------. -# | _ _ __ | -# | ___ ___ ___| |_(_) ___ _ __ / _|_ _ _ __ ___ ___ | -# | / __|/ _ \/ __| __| |/ _ \| '_ \ | |_| | | | '_ \ / __/ __| | -# | \__ \ __/ (__| |_| | (_) | | | | | _| |_| | | | | (__\__ \ | -# | |___/\___|\___|\__|_|\___/|_| |_| |_| \__,_|_| |_|\___|___/ | -# | | -# '----------------------------------------------------------------------' - - -function postgres_instances() { - echo '<<>>' - # If we have no instances we take db id (pqsql/postgres) because - # ps output may be unreadable - # In case of instances ps output shows them readable - if [ ! -z "${1}" ]; then - echo "[[[${1}]]]" - fi - pgrep -laf bin/postgres -} - - -function postgres_sessions() { - # Postgres 9.2 uses 'query' instead of 'current_query' - local OUTPUT - OUTPUT="$(echo "\echo '<<>>${INSTANCE_SECTION}' - SELECT ( - SELECT column_name - FROM information_schema.columns - WHERE table_name='pg_stat_activity' AND column_name in ('query', 'current_query') - ) = '' as query, count(*) - FROM pg_stat_activity - GROUP BY (query = '');" |\ - su - "$DBUSER" -c "$export_PGPASSFILE $psql -X --variable ON_ERROR_STOP=1 -d $PGDATABASE ${EXTRA_ARGS} -A -t -F' '" 2>/dev/null)" - - echo "$OUTPUT" - # line with number of idle sessions is sometimes missing on Postgres 8.x. This can lead - # to an altogether empty section and thus the check disappearing. - echo "$OUTPUT" | grep -q '^t ' || echo "t 0" -} - - -function postgres_simple_queries() { - # Querytime - # Supports versions >= 8.3, > 9.1 - local QUERYTIME_QUERY - if compare_version_greater_equal "$POSTGRES_VERSION" "9.2" ; then - QUERYTIME_QUERY="SELECT datname, datid, usename, client_addr, state AS state, COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0) AS seconds, - pid, regexp_replace(query, E'[\\n\\r\\u2028]+', ' ', 'g' ) AS current_query FROM pg_stat_activity WHERE (query_start IS NOT NULL AND (state NOT LIKE 'idle%' OR state IS NULL)) ORDER BY query_start, pid DESC;" - else - QUERYTIME_QUERY="SELECT datname, datid, usename, client_addr, '' AS state, COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0) AS seconds, - procpid as pid, regexp_replace(current_query, E'[\\n\\r\\u2028]+', ' ', 'g' ) AS current_query FROM pg_stat_activity WHERE (query_start IS NOT NULL AND current_query NOT LIKE '%') ORDER BY query_start, procpid DESC;" - fi - - # Number of current connections per database - # We need to output the databases, too. - # This query does not report databases without an active query - local CONNECTIONS_QUERY - if compare_version_greater_equal "$POSTGRES_VERSION" "9.2" ; then - CONNECTIONS_QUERY="SELECT COUNT(datid) AS current, - (SELECT setting AS mc FROM pg_settings WHERE name = 'max_connections') AS mc, - d.datname - FROM pg_database d - LEFT JOIN pg_stat_activity s ON (s.datid = d.oid) WHERE state <> 'idle' - GROUP BY 2,3 - ORDER BY datname;" - else - CONNECTIONS_QUERY="SELECT COUNT(datid) AS current, - (SELECT setting AS mc FROM pg_settings WHERE name = 'max_connections') AS mc, - d.datname - FROM pg_database d - LEFT JOIN pg_stat_activity s ON (s.datid = d.oid) WHERE current_query <> '' - GROUP BY 2,3 - ORDER BY datname;" - fi - - echo "\pset footer off - \echo '<<>>${INSTANCE_SECTION}' - SELECT datid, datname, numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, pg_database_size(datname) AS datsize FROM pg_stat_database; - - \echo '<<>>${INSTANCE_SECTION}' - \echo '[databases_start]' - $ECHO_DATABASES - \echo '[databases_end]' - SELECT datname, granted, mode FROM pg_locks l RIGHT JOIN pg_database d ON (d.oid=l.database) WHERE d.datallowconn; - - \echo '<<>>${INSTANCE_SECTION}' - \echo '[databases_start]' - $ECHO_DATABASES - \echo '[databases_end]' - $QUERYTIME_QUERY - - \echo '<<>>${INSTANCE_SECTION}' - \echo '[databases_start]' - $ECHO_DATABASES - \echo '[databases_end]' - $CONNECTIONS_QUERY" \ - | su - "$DBUSER" -c "$export_PGPASSFILE $psql -X -d $PGDATABASE ${EXTRA_ARGS} -q -A -F';'" -} - - -function postgres_stats() { - # Contains last vacuum time and analyze time - local LASTVACUUM="SELECT current_database() AS datname, nspname AS sname, relname AS tname, - CASE WHEN v IS NULL THEN -1 ELSE round(extract(epoch FROM v)) END AS vtime, - CASE WHEN g IS NULL THEN -1 ELSE round(extract(epoch FROM v)) END AS atime - FROM (SELECT nspname, relname, GREATEST(pg_stat_get_last_vacuum_time(c.oid), pg_stat_get_last_autovacuum_time(c.oid)) AS v, - GREATEST(pg_stat_get_last_analyze_time(c.oid), pg_stat_get_last_autoanalyze_time(c.oid)) AS g - FROM pg_class c, pg_namespace n - WHERE relkind = 'r' AND n.oid = c.relnamespace AND n.nspname <> 'information_schema' - ORDER BY 3) AS foo;" - - local FIRST= - local QUERY="\pset footer off - BEGIN; - SET statement_timeout=30000; - COMMIT; - - \echo '<<>>${INSTANCE_SECTION}' - \echo '[databases_start]' - $ECHO_DATABASES - \echo '[databases_end]'" - - for db in $DATABASES ; do - QUERY="$QUERY - \c $db - $LASTVACUUM - " - if [ -z $FIRST ] ; then - FIRST=false - QUERY="$QUERY - \pset tuples_only on - " - fi - done - echo "$QUERY" | su - "$DBUSER" -c "$export_PGPASSFILE $psql -X ${EXTRA_ARGS} -q -A -F';'" | grep -v -e 'COMMIT$' -e 'SET$' -e 'BEGIN$' -} - - -function postgres_version() { - # Postgres version an connection time - echo -e "<<>>${INSTANCE_SECTION}" - (TIMEFORMAT='%3R'; time echo "SELECT version() AS v" |\ - su - "$DBUSER" -c "$export_PGPASSFILE $psql -X -d $PGDATABASE ${EXTRA_ARGS} -t -A -F';'; echo -e '<<>>${INSTANCE_SECTION}'") 2>&1 -} - - -function postgres_bloat() { - # Bloat index and tables - # Supports versions <9.0, >=9.0 - # This huge query has been gratefully taken from Greg Sabino Mullane's check_postgres.pl - local BLOAT_QUERY - if compare_version_greater_equal "$POSTGRES_VERSION" "9.0" ; then - BLOAT_QUERY="SELECT - current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta, - ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat, - CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages, - CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes, - CASE WHEN relpages < otta THEN 0 ELSE (bs*(relpages-otta))::bigint END AS wastedsize, - iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta, - ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat, - CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages, - CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes, - CASE WHEN ipages < iotta THEN 0 ELSE (bs*(ipages-iotta))::bigint END AS wastedisize, - CASE WHEN relpages < otta THEN - CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END - ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint) - ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END - END AS totalwastedbytes - FROM ( - SELECT - nn.nspname AS schemaname, - cc.relname AS tablename, - COALESCE(cc.reltuples,0) AS reltuples, - COALESCE(cc.relpages,0) AS relpages, - COALESCE(bs,0) AS bs, - COALESCE(CEIL((cc.reltuples*((datahdr+ma- - (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta, - COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, - COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols - FROM - pg_class cc - JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema' - LEFT JOIN - ( - SELECT - ma,bs,foo.nspname,foo.relname, - (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, - (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 - FROM ( - SELECT - ns.nspname, tbl.relname, hdr, ma, bs, - SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth, - MAX(coalesce(null_frac,0)) AS maxfracsum, - hdr+( - SELECT 1+count(*)/8 - FROM pg_stats s2 - WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname - ) AS nullhdr - FROM pg_attribute att - JOIN pg_class tbl ON att.attrelid = tbl.oid - JOIN pg_namespace ns ON ns.oid = tbl.relnamespace - LEFT JOIN pg_stats s ON s.schemaname=ns.nspname - AND s.tablename = tbl.relname - AND s.inherited=false - AND s.attname=att.attname, - ( - SELECT - (SELECT current_setting('block_size')::numeric) AS bs, - CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\[0-9]+.[0-9]+#\%' for '#') - IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, - CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma - FROM (SELECT version() AS v) AS foo - ) AS constants - WHERE att.attnum > 0 AND tbl.relkind='r' - GROUP BY 1,2,3,4,5 - ) AS foo - ) AS rs - ON cc.relname = rs.relname AND nn.nspname = rs.nspname - LEFT JOIN pg_index i ON indrelid = cc.oid - LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid - ) AS sml - WHERE sml.relpages - otta > 0 OR ipages - iotta > 10 ORDER BY totalwastedbytes DESC LIMIT 10;" - else - BLOAT_QUERY="SELECT - current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta, - ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat, - CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages, - CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes, - CASE WHEN relpages < otta THEN '0 bytes'::text ELSE (bs*(relpages-otta))::bigint || ' bytes' END AS wastedsize, - iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta, - ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat, - CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages, - CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes, - CASE WHEN ipages < iotta THEN '0 bytes' ELSE (bs*(ipages-iotta))::bigint || ' bytes' END AS wastedisize, - CASE WHEN relpages < otta THEN - CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END - ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint) - ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END - END AS totalwastedbytes - FROM ( - SELECT - nn.nspname AS schemaname, - cc.relname AS tablename, - COALESCE(cc.reltuples,0) AS reltuples, - COALESCE(cc.relpages,0) AS relpages, - COALESCE(bs,0) AS bs, - COALESCE(CEIL((cc.reltuples*((datahdr+ma- - (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta, - COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, - COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols - FROM - pg_class cc - JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema' - LEFT JOIN - ( - SELECT - ma,bs,foo.nspname,foo.relname, - (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, - (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 - FROM ( - SELECT - ns.nspname, tbl.relname, hdr, ma, bs, - SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth, - MAX(coalesce(null_frac,0)) AS maxfracsum, - hdr+( - SELECT 1+count(*)/8 - FROM pg_stats s2 - WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname - ) AS nullhdr - FROM pg_attribute att - JOIN pg_class tbl ON att.attrelid = tbl.oid - JOIN pg_namespace ns ON ns.oid = tbl.relnamespace - LEFT JOIN pg_stats s ON s.schemaname=ns.nspname - AND s.tablename = tbl.relname - AND s.attname=att.attname, - ( - SELECT - (SELECT current_setting('block_size')::numeric) AS bs, - CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\"[0-9]+.[0-9]+#\"%' for '#') - IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, - CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma - FROM (SELECT version() AS v) AS foo - ) AS constants - WHERE att.attnum > 0 AND tbl.relkind='r' - GROUP BY 1,2,3,4,5 - ) AS foo - ) AS rs - ON cc.relname = rs.relname AND nn.nspname = rs.nspname - LEFT JOIN pg_index i ON indrelid = cc.oid - LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid - ) AS sml - WHERE sml.relpages - otta > 0 OR ipages - iotta > 10 ORDER BY totalwastedbytes DESC LIMIT 10;" - fi - - local FIRST= - local QUERY="\pset footer off - \echo '<<>>${INSTANCE_SECTION}' - \echo '[databases_start]' - $ECHO_DATABASES - \echo '[databases_end]'" - - for db in $DATABASES ; do - QUERY="$QUERY - \c $db - $BLOAT_QUERY - " - if [ -z $FIRST ] ; then - FIRST=false - QUERY="$QUERY - \pset tuples_only on - " - fi - done - echo "$QUERY" | su - "$DBUSER" -c "$export_PGPASSFILE $psql -X ${EXTRA_ARGS} -q -A -F';'" -} - - -#. -# .--main----------------------------------------------------------------. -# | _ | -# | _ __ ___ __ _(_)_ __ | -# | | '_ ` _ \ / _` | | '_ \ | -# | | | | | | | (_| | | | | | | -# | |_| |_| |_|\__,_|_|_| |_| | -# | | -# '----------------------------------------------------------------------' - - -### postgres.cfg ## -# DBUSER=OS_USER_NAME -# INSTANCE=/home/postgres/db1.env:USER_NAME:/PATH/TO/.pgpass -# INSTANCE=/home/postgres/db2.env:USER_NAME:/PATH/TO/.pgpass - -# TODO @dba USERNAME in .pgpass ? -# INSTANCE=/home/postgres/db2.env:/PATH/TO/.pgpass - - -function postgres_main() { - if [ -z "$DBUSER" ] || [ -z "$PGDATABASE" ] ; then - exit 0 - fi - - EXTRA_ARGS="" - if [ ! -z "$PGUSER" ]; then - EXTRA_ARGS=$EXTRA_ARGS" -U $PGUSER" - fi - if [ ! -z "$PGPORT" ]; then - EXTRA_ARGS=$EXTRA_ARGS" -p $PGPORT" - fi - - if [ ! -z "$PGPASSFILE" ]; then - export_PGPASSFILE="export PGPASSFILE=$PGPASSFILE; " - fi - - DATABASES="$(echo "SELECT datname FROM pg_database WHERE datistemplate = false;" |\ - su - "$DBUSER" -c "$export_PGPASSFILE $psql -X -d $PGDATABASE ${EXTRA_ARGS} -t -A -F';'")" - ECHO_DATABASES="$(echo "$DATABASES" | sed 's/^/\\echo /')" - - POSTGRES_VERSION=$(su - "$DBUSER" -c "$psql -X -V -d $PGDATABASE ${EXTRA_ARGS} | egrep -o '[0-9]{1,}\.[0-9]{1,}'") - - postgres_sessions - postgres_simple_queries - postgres_stats - postgres_version - postgres_bloat -} - - -MK_CONFFILE=$MK_CONFDIR/postgres.cfg -if [ -e "$MK_CONFFILE" ]; then - - postgres_instances - - DBUSER=$(grep DBUSER "$MK_CONFFILE" | sed 's/.*=//g') - cat "$MK_CONFFILE" | while read line - do - case $line in - INSTANCE*) - instance=$line - ;; - *) - instance= - ;; - esac - - if [ ! -z "$instance" ]; then - instance_path=$(echo "$instance" | sed 's/.*=\(.*\):.*:.*$/\1/g') - instance_name=$(echo "$instance_path" | sed -e 's/.*\/\(.*\)/\1/g' -e 's/\.env$//g') - if [ ! -z "$instance_name" ]; then - INSTANCE_SECTION="\n[[[$instance_name]]]" - else - INSTANCE_SECTION="" - fi - - psql="/$DBUSER/$(grep "^export PGVERSION=" "$instance_path" | - sed -e 's/.*=//g' -e 's/\s*#.*$//g')/bin/psql" - - PGUSER=$(echo "$instance" | sed 's/.*=.*:\(.*\):.*$/\1/g') - PGPASSFILE="$(echo "$instance" | sed 's/.*=.*:.*:\(.*\)$/\1/g')" - PGDATABASE=$(grep "^export PGDATABASE=" "$instance_path" | - sed -e 's/.*=//g' -e 's/\s*#.*$//g') - PGPORT=$(grep "^export PGPORT=" "$instance_path" | - sed -e 's/.*=//g' -e 's/\s*#.*$//g') - - # Fallback - if [ ! -f "$psql" ]; then - psql="$(cat $instance_path | grep "^export PGHOME=" | - sed -e 's/.*=//g' -e 's/\s*#.*$//g')/psql" - fi - - postgres_main - - fi - done - -else - - if id pgsql >/dev/null 2>&1; then - DBUSER=pgsql - elif id postgres >/dev/null 2>&1; then - DBUSER=postgres - else - exit 0 - fi - INSTANCE_SECTION="" - - postgres_instances "$DBUSER" - - psql="psql" - PGDATABASE=postgres - postgres_main - -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap deleted file mode 100755 index b64a4453..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap +++ /dev/null @@ -1,504 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This agent plugin has been built to collect information from SAP R/3 systems -# using RFC calls. It needs the python module sapnwrfc (available in Check_MK -# git at agents/sap/sapnwrfc) and the nwrfcsdk (can be downloaded from SAP -# download portal) installed to be working. You can configure the agent plugin -# using the configuration file /etc/check_mk/sap.cfg (a sample file can be -# found in Check_MK git at agents/sap/sap.cfg) to tell it how to connect to -# your SAP instance and which values you want to fetch from your system to be -# forwarded to and checked by Check_MK. -# -# This current agent has been developed and tested with: -# python-sapnwrfc-0.19 -# -# During development the "CCMS_Doku.pdf" was really helpful. - -import os, sys, fcntl -import time, datetime - -# sapnwrfc needs to know where the libs are located. During -# development the import failed, since the module did not -# find the libraries. So we preload the library to have it -# already loaded. -try: - import sapnwrfc -except ImportError, e: - if 'sapnwrfc.so' in str(e): - sys.stderr.write( - 'Unable to find the library sapnwrfc.so. Maybe you need to put a file pointing to\n' - 'the sapnwrfc library directory into the /etc/ld.so.conf.d directory. For example\n' - 'create the file /etc/ld.so.conf.d/sapnwrfc.conf containing the path\n' - '"/usr/sap/nwrfcsdk/lib" and run "ldconfig" afterwards.\n' - ) - sys.exit(1) - elif 'No module named sapnwrfc' in str(e): - sys.stderr.write("Missing the Python module sapnwfrc.\n") - sys.exit(1) - else: - raise - -# ############################################################################# - -# This sign is used to separate the path parts given in the config -SEPARATOR = '/' - -# This are the different classes of monitoring objects which -# can be found in the tree. -# -# Summarizs information from several subnodes -MTE_SUMMARY = '050' -# A monitoring object which has several subnodes which lead to the status -# of this object. For example it is the "CPU" object on a host -MTE_MON_OBJ = '070' -# Contains performance information (which can be used to create graphs from) -MTE_PERFORMANCE = '100' -# Might contain several messages -MTE_MSG_CONTAINER = '101' -# Contains a single status message -MTE_SINGLE_MSG = '102' -# This is a long text label without status -MTE_LONG_TXT = '110' -# This is a short text label without status -MTE_SHORT_TXT = '111' -# Is a "folder" which has no own state, just computed by its childs -MTE_VIRTUAL = '199' - -# This map converts between the SAP color codes (key values) and the -# nagios state codes and strings -STATE_VALUE_MAP = { - 0: (0, 'OK'), # GRAY (inactive or no current info available) -> OK - 1: (0, 'OK'), # GREEN -> OK - 2: (1, 'WARN'), # YELLOW -> WARNING - 3: (2, 'CRIT'), # RED -> CRITICAL -} - -STATE_LOGWATCH_MAP = [ 'O', 'O', 'W', 'C' ] - -# Monitoring objects of these classes are skipped during processing -SKIP_MTCLASSES = [ - MTE_VIRTUAL, - MTE_SUMMARY, - MTE_MON_OBJ, - MTE_SHORT_TXT, - MTE_LONG_TXT, -] - -MK_CONFDIR = os.getenv("MK_CONFDIR") or "/etc/check_mk" -MK_VARDIR = os.getenv("MK_VARDIR") or "/var/lib/check_mk_agent" - -STATE_FILE = MK_VARDIR + '/sap.state' -state_file_changed = False - -# ############################################################################# - -# Settings to be used to connect to the SAP R/3 host. -local_cfg = { - 'ashost': 'localhost', - 'sysnr': '00', - 'client': '100', - 'user': '', - 'passwd': '', - 'trace': '3', - 'loglevel': 'warn', - #'lang': 'EN', -} - -# A list of strings, while the string must match the full path to one or -# several monitor objects. We use unix shell patterns during matching, so -# you can use several chars as placeholders: -# -# * matches everything -# ? matches any single character -# [seq] matches any character in seq -# [!seq] matches any character not in seq -# -# The * matches the whole following string and does not end on next "/". -# For examples, take a look at the default config file (/etc/check_mk/sap.cfg). -monitor_paths = [ - 'SAP CCMS Monitor Templates/Dialog Overview/*', -] -monitor_types = [] -config_file = MK_CONFDIR + '/sap.cfg' - -cfg = {} -if os.path.exists(config_file): - execfile(config_file) - if type(cfg) == dict: - cfg = [ cfg ] -else: - cfg = [ local_cfg ] - -# Load the state file into memory -try: - states = eval(file(STATE_FILE).read()) -except IOError: - states = {} - -# index of all logfiles which have been found in a run. This is used to -# remove logfiles which are not available anymore from the states dict. -logfiles = [] - -# ############################################################################# - -# -# HELPERS -# - -import fnmatch - -def to_be_monitored(path, toplevel_match = False): - for rule in monitor_paths: - if toplevel_match and rule.count('/') > 1: - rule = '/'.join(rule.split('/')[:2]) - - if fnmatch.fnmatch(path, rule): - return True - return False - -def node_path(tree, node, path = ''): - if path: - path = node['MTNAMESHRT'].rstrip() + SEPARATOR + path - else: - path = node['MTNAMESHRT'].rstrip() - - if node['ALPARINTRE'] > 0: - parent_node = tree[node['ALPARINTRE'] - 1] - return node_path(tree, parent_node, path) - return path - -# -# API ACCESS FUNCTIONS -# - -def query(what, params, debug = False): - fd = conn.discover(what) - - if debug: - sys.stdout.write("Name: %s Params: %s\n" % (fd.name, fd.handle.parameters)) - sys.stdout.write("Given-Params: %s\n" % params) - - f = fd.create_function_call() - for key, val in params.items(): - getattr(f, key)(val) - f.invoke() - - ret = f.RETURN.value - if ret['TYPE'] == 'E': - sys.stderr.write("ERROR: %s\n" % ret['MESSAGE'].strip()) - - return f - -def login(): - f = query('BAPI_XMI_LOGON', { - 'EXTCOMPANY': 'Mathias Kettner GmbH', - 'EXTPRODUCT': 'Check_MK SAP Agent', - 'INTERFACE': 'XAL', - 'VERSION': '1.0', - }) - #sys.stdout.write("%s\n" % f.RETURN) - return f.SESSIONID.value - -def logout(): - query('BAPI_XMI_LOGOFF', { - 'INTERFACE': 'XAL', - }) - -def mon_list(cfg): - f = query("BAPI_SYSTEM_MON_GETLIST", { - 'EXTERNAL_USER_NAME': cfg['user'], - }) - l = [] - for mon in f.MONITOR_NAMES.value: - l.append((mon["MS_NAME"].rstrip(), mon["MONI_NAME"].rstrip())) - return l - -#def ms_list( cfg ): -# f = query("BAPI_SYSTEM_MS_GETLIST", { -# 'EXTERNAL_USER_NAME': cfg['user'], -# }) -# l = [] -# for ms in f.MONITOR_SETS.value: -# l.append(ms['NAME'].rstrip()) -# return l - -def mon_tree(cfg, ms_name, mon_name): - f = query("BAPI_SYSTEM_MON_GETTREE", { - 'EXTERNAL_USER_NAME': cfg['user'], - 'MONITOR_NAME': {"MS_NAME": ms_name, "MONI_NAME": mon_name}, - }) - tree = f.TREE_NODES.value - for node in tree: - node['PATH'] = ms_name + SEPARATOR + node_path(tree, node) - return tree - -def tid(node): - return { - 'MTSYSID': node['MTSYSID'].strip(), - 'MTMCNAME': node['MTMCNAME'].strip(), - 'MTNUMRANGE': node['MTNUMRANGE'].strip(), - 'MTUID': node['MTUID'].strip(), - 'MTCLASS': node['MTCLASS'].strip(), - 'MTINDEX': node['MTINDEX'].strip(), - 'EXTINDEX': node['EXTINDEX'].strip(), - } - -def mon_perfdata(cfg, node): - f = query('BAPI_SYSTEM_MTE_GETPERFCURVAL', { - 'EXTERNAL_USER_NAME': cfg['user'], - 'TID': tid(node), - }) - value = f.CURRENT_VALUE.value['LASTPERVAL'] - - f = query('BAPI_SYSTEM_MTE_GETPERFPROP', { - 'EXTERNAL_USER_NAME': cfg['user'], - 'TID': tid(node), - }) - if f.PROPERTIES.value['DECIMALS'] != 0: - value = (value + 0.0) / 10**f.PROPERTIES.value['DECIMALS'] - uom = f.PROPERTIES.value['VALUNIT'].strip() - - return value, uom - -def mon_msg(cfg, node): - f = query('BAPI_SYSTEM_MTE_GETSMVALUE', { - 'EXTERNAL_USER_NAME': cfg['user'], - 'TID': tid(node), - }) - data = f.VALUE.value - dt = parse_dt(data['SMSGDATE'], data['SMSGTIME']) - return (dt, data['MSG'].strip()) - -def parse_dt(d, t): - d = d.strip() - t = t.strip() - if not d or not t: - return None - else: - return datetime.datetime(*time.strptime(d + t, '%Y%m%d%H%M%S')[:6]) - -def mon_alerts(cfg, node): - f = query('BAPI_SYSTEM_MTE_GETALERTS', { - 'EXTERNAL_USER_NAME': cfg['user'], - 'TID': tid(node), - }) - return f.ALERTS.value - -def aid(alert): - return { - "ALSYSID": alert["ALSYSID"], - "MSEGNAME": alert["MSEGNAME"], - "ALUNIQNUM": alert["ALUNIQNUM"], - "ALINDEX": alert["ALINDEX"], - "ALERTDATE": alert["ALERTDATE"], - "ALERTTIME": alert["ALERTTIME"], - } - -def alert_details(cfg, alert): - f = query('BAPI_SYSTEM_ALERT_GETDETAILS', { - 'EXTERNAL_USER_NAME': cfg['user'], - 'AID': aid(alert), - }) - #prop = f.PROPERTIES.value - state = f.VALUE.value - msg = f.XMI_EXT_MSG.value['MSG'].strip() - return state, msg - -def process_alerts(cfg, logs, ms_name, mon_name, node, alerts): - global state_file_changed - - sid = node["MTSYSID"].strip() or 'Other' - context = node["MTMCNAME"].strip() or 'Other' - path = node["PATH"] - - # Use the sid as hostname for the logs - hostname = sid - logfile = context + "/" + path - - logfiles.append((hostname, logfile)) - - logs.setdefault(sid, {}) - logs[hostname][logfile] = [] - newest_log_dt = None - for alert in alerts: - dt = parse_dt(alert['ALERTDATE'], alert['ALERTTIME']) - - if (hostname, logfile) in states and states[(hostname, logfile)] >= dt: - continue # skip log messages which are older than the last cached date - - if not newest_log_dt or dt > newest_log_dt: - newest_log_dt = dt # store the newest log of this run - - alert_state, alert_msg = alert_details(cfg, alert) - # Format lines to "logwatch" format - logs[hostname][logfile].append('%s %s %s' % (STATE_LOGWATCH_MAP[alert_state['VALUE']], - dt.strftime("%Y-%m-%d %H:%M:%S"), alert_msg)) - - if newest_log_dt: - # Write newest log age to cache to prevent double processing of logs - states[(hostname, logfile)] = newest_log_dt - state_file_changed = True - return logs - - - -def check(cfg): - global conn - conn = sapnwrfc.base.rfc_connect(cfg) - login() - - logs = {} - sap_data = {} - - # This loop is used to collect all information from SAP - for ms_name, mon_name in mon_list(cfg): - path = ms_name + SEPARATOR + mon_name - if not to_be_monitored(path, True): - continue - - tree = mon_tree(cfg, ms_name, mon_name) - for node in tree: - if not to_be_monitored(node['PATH']): - continue - #sys.stdout.write("%s\n" % node["PATH"]) - - status_details = '' - perfvalue = '-' - uom = '-' - - # Use precalculated states - state = { - 'VALUE': node['ACTUALVAL'], - 'SEVERITY': node['ACTUALSEV'], - } - - if state['VALUE'] not in STATE_VALUE_MAP: - sys.stdout.write('UNHANDLED STATE VALUE\n') - sys.exit(1) - - # - # Handle different object classes individually - # to get details about them - # - - if monitor_types and node['MTCLASS'] not in monitor_types: - continue # Skip unwanted classes if class filtering is enabled - - if node['MTCLASS'] == MTE_PERFORMANCE: - perfvalue, this_uom = mon_perfdata(cfg, node) - uom = this_uom and this_uom or uom - - elif node['MTCLASS'] == MTE_SINGLE_MSG: - status_details = "%s: %s" % mon_msg(cfg, node) - - elif node['MTCLASS'] == MTE_MSG_CONTAINER: - - alerts = mon_alerts(cfg, node) - logs = process_alerts(cfg, logs, ms_name, mon_name, node, alerts) - if len(alerts) > 0: - last_alert = alerts[-1] - dt = parse_dt(last_alert["ALERTDATE"], last_alert["ALERTTIME"]) - alert_state, alert_msg = alert_details(cfg, last_alert) - last_msg = '%s: %s - %s' % (dt, STATE_VALUE_MAP[alert_state['VALUE']][1], alert_msg) - - status_details = '%d Messages, Last: %s' % (len(alerts), last_msg) - else: - status_details = 'The log is empty' - - elif node['MTCLASS'] not in SKIP_MTCLASSES: - # Add an error to output on unhandled classes - status_details = "UNHANDLED MTCLASS", node['MTCLASS'] - - if node['MTCLASS'] not in SKIP_MTCLASSES: - sid = node["MTSYSID"].strip() or 'Other' - context = node["MTMCNAME"].strip() or 'Other' - path = node["PATH"] - - sap_data.setdefault(sid, []) - sap_data[sid].append("%s\t%d\t%3d\t%s\t%s\t%s\t%s" % (context, state['VALUE'], - state['SEVERITY'], path, perfvalue, uom, status_details)) - - - for host, host_sap in sap_data.items(): - sys.stdout.write('<<<<%s>>>>\n' % host) - sys.stdout.write('<<>>\n') - sys.stdout.write('%s\n' % '\n'.join(host_sap)) - sys.stdout.write('<<<<>>>>\n') - - for host, host_logs in logs.items(): - sys.stdout.write('<<<<%s>>>>\n' % host) - sys.stdout.write('<<>>\n') - for log, lines in host_logs.items(): - sys.stdout.write('[[[%s]]]\n' % log) - if lines: - sys.stdout.write('\n'.join(lines) + '\n') - sys.stdout.write('<<<<>>>>\n') - - logout() - conn.close() - -# It is possible to configure multiple SAP instances to monitor. Loop them all, but -# do not terminate when one connection failed -processed_all = True -try: - for entry in cfg: - try: - check(entry) - sys.stdout.write('<<>>\n%s\tOK\n' % entry['ashost']) - except sapnwrfc.RFCCommunicationError, e: - sys.stderr.write('ERROR: Unable to connect (%s)\n' % e) - sys.stdout.write('<<>>\n%s\tUnable to connect (%s)\n' %\ - (entry['ashost'], e)) - processed_all = False - except Exception, e: - sys.stderr.write('ERROR: Unhandled exception (%s)\n' % e) - sys.stdout.write('<<>>\n%s\tUnhandled exception (%s)\n' %\ - (entry['ashost'], e)) - processed_all = False - - # Now check whether or not an old logfile needs to be removed. This can only - # be done this way, when all hosts have been reached. Otherwise the cleanup - # is skipped. - if processed_all: - for key in states.keys(): - if key not in logfiles: - state_file_changed = True - del states[key] - - # Only write the state file once per run. And only when it has been changed - if state_file_changed: - new_file = STATE_FILE + '.new' - fd = os.open(new_file, os.O_WRONLY | os.O_CREAT) - fcntl.flock(fd, fcntl.LOCK_EX) - os.write(fd, repr(states)) - os.close(fd) - os.rename(STATE_FILE+'.new', STATE_FILE) - -except Exception, e: - sys.stderr.write('ERROR: Unhandled exception (%s)\n' % e) - -sys.exit(0) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap.aix b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap.aix deleted file mode 100755 index bbfa0722..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap.aix +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# cat sapservices -##!/bin/sh -#LIBPATH=/usr/sap/C01/DVEBMGS25/exe:$LIBPATH; export LIBPATH; /usr/sap/C01/DVEBMGS25/exe/sapstartsrv pf=/usr/sap/C01/SYS/profile/START_DVEBMGS25_sap10c1 -D -u c01adm -#LIBPATH=/usr/sap/DC1/SMDA98/exe:$LIBPATH; export LIBPATH; /usr/sap/DC1/SMDA98/exe/sapstartsrv pf=/usr/sap/DC1/SYS/profile/DC1_SMDA98_sap10c1 -D -u dc1adm -#LIBPATH=/usr/sap/C02/DVEBMGS37/exe:$LIBPATH; export LIBPATH; /usr/sap/C02/DVEBMGS37/exe/sapstartsrv pf=/usr/sap/C02/SYS/profile/START_DVEBMGS37_sap10c1 -D -u c02adm -#LIBPATH=/usr/sap/DAA/SMDA97/exe:$LIBPATH; export LIBPATH; /usr/sap/DAA/SMDA97/exe/sapstartsrv pf=/usr/sap/DAA/SYS/profile/DAA_SMDA97_sap10c1 -D -u daaadm -#LIBPATH=/usr/sap/DDB/SMDA96/exe:$LIBPATH; export LIBPATH; /usr/sap/DDB/SMDA96/exe/sapstartsrv pf=/usr/sap/DDB/SYS/profile/DDB_SMDA96_sap10c1d -D -u ddbadm - -# <<>> -# [69] -# 05.06.2015 05:44:36 -# GetProcessList -# OK -# name, description, dispstatus, textstatus, starttime, elapsedtime, pid -# msg_server, MessageServer, GREEN, Running, 2015 06 01 02:28:51, 99:15:45, 17563810 -# enserver, EnqueueServer, GREEN, Running, 2015 06 01 02:28:51, 99:15:45, 15466710 -# gwrd, Gateway, GREEN, Running, 2015 06 01 02:28:51, 99:15:45, 25428046 -# [68] -# 05.06.2015 05:44:36 -# GetProcessList -# OK -# name, description, dispstatus, textstatus, starttime, elapsedtime, pid -# jstart, J2EE Server, GREEN, All processes running, 2015 06 01 02:29:06, 99:15:30, 18087996 -# igswd_mt, IGS Watchdog, GREEN, Running, 2015 06 01 02:29:06, 99:15:30, 9765042 - -# -# <<>> -# GetProcessList -# FAIL: NIECONN_REFUSED (Connection refused), NiRawConnect failed in plugin_fopen() - -sapservices="/usr/sap/sapservices" - -if [ -r "$sapservices" ]; then - echo "<<>>" - # loop over ids - cat $sapservices | while read LINE - do - command=$(echo $LINE | grep "^LIBPATH=/usr/sap/" | grep -v "^LIBPATH=/usr/sap/D" | awk -F" " '{print $5}') - if [ -n "$command" ]; then - id2=$(echo $command | awk -F"/" '{print $4}') - path=$(echo "/sapmnt/$id2/exe") - sappfpar=$(find $path -name sappfpar | head -1) - sapcontrol=$(find $path -name sapcontrol | head -1) - libpath=$(find $path -name libicuuc\*.a | head -1 | sed -e 's,/[^ /]*$,,') - id=$(LIBPATH=$LIBPATH:$libpath $sappfpar SAPSYSTEM $command) - echo -n "[$id]" - LIBPATH=$LIBPATH:$libpath $sapcontrol -nr $id -function GetProcessList - fi - done -fi - - - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap_hana b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap_hana deleted file mode 100755 index 179c47f5..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sap_hana +++ /dev/null @@ -1,143 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -# .--config--------------------------------------------------------------. -# | __ _ | -# | ___ ___ _ __ / _(_) __ _ | -# | / __/ _ \| '_ \| |_| |/ _` | | -# | | (_| (_) | | | | _| | (_| | | -# | \___\___/|_| |_|_| |_|\__, | | -# | |___/ | -# '----------------------------------------------------------------------' - - -# Config might include -# DB_USER_hpy=foo -# DB_PASSWORD_hpy=bar -if [ -f $MK_CONFDIR/sap_hana.cfg ]; -then - . $MK_CONFDIR/sap_hana.cfg -fi - - -LSSAP=/usr/sap/hostctrl/exe/lssap -if [[ ! -x $LSSAP ]]; then - exit 1 -fi - - -#. -# .--queries-------------------------------------------------------------. -# | _ | -# | __ _ _ _ ___ _ __(_) ___ ___ | -# | / _` | | | |/ _ \ '__| |/ _ \/ __| | -# | | (_| | |_| | __/ | | | __/\__ \ | -# | \__, |\__,_|\___|_| |_|\___||___/ | -# | |_| | -# '----------------------------------------------------------------------' - - -# Just do: su - sidadm -c "$CMD" -function process_list () { - echo "<<>>" - su - $1 -c "sapcontrol -nr $2 -function GetProcessList" -} - - -function hdb_version () { - echo "<<>>" - su - $1 -c "HDB version" -} - - -# Do: sudo -i -u sidadm hdbsql -n localhost -i 65 -u -p '' -j -a -x -function full_backup () { - local sidadm=$1 - local inst_nr=$2 - local db_user=$3 - local db_password="$4" - echo "<<>>" - sudo -i -u ${sidadm} hdbsql -n localhost -i ${inst_nr} -u ${db_user} -p "${db_password}" -j -a -x "SELECT TO_DATE(SYS_END_TIME), days_between(SYS_END_TIME,CURRENT_TIMESTAMP) ,seconds_between(SYS_START_TIME,SYS_END_TIME), STATE_NAME,COMMENT FROM M_BACKUP_CATALOG WHERE ENTRY_TYPE_NAME = 'complete data backup' AND SYS_START_TIME = (SELECT MAX(SYS_START_TIME) FROM M_BACKUP_CATALOG WHERE ENTRY_TYPE_NAME = 'complete data backup');" -} - - -function sap_hana_mem () { - local sidadm=$1 - local inst_nr=$2 - local db_user=$3 - local db_password="$4" - echo "<<>>" - echo "[[[resident]]]" - sudo -i -u ${sidadm} hdbsql -n localhost -i ${inst_nr} -u ${db_user} -p "${db_password}" -j -a -x "SELECT HOST, ROUND(SUM(PHYSICAL_MEMORY_SIZE/1024/1024/1024),2) FROM M_SERVICE_MEMORY GROUP BY HOST;" - echo "[[[database]]]" - sudo -i -u ${sidadm} hdbsql -n localhost -i ${inst_nr} -u ${db_user} -p "${db_password}" -j -a -x "select HOST, round(INSTANCE_TOTAL_MEMORY_ALLOCATED_SIZE/(1024*1024*1024), 2), round(ALLOCATION_LIMIT/(1024*1024*1024), 2), ROUND((USED_PHYSICAL_MEMORY + FREE_PHYSICAL_MEMORY)/1024/1024/1024,2) from M_HOST_RESOURCE_UTILIZATION;" -} - - - -function sap_hana_filesystem () { - local sidadm=$1 - local inst_nr=$2 - local db_user=$3 - local db_password="$4" - echo "<<>>" - sudo -i -u ${sidadm} hdbsql -n localhost -i ${inst_nr} -u ${db_user} -p "${db_password}" -j -a -x "select D1.HOST, D1.USAGE_TYPE, ROUND(D2.USED_SIZE/1024/1024/1024,2),ROUND(D1.TOTAL_SIZE/1024/1024/1024,2) FROM M_DISKS AS D1 INNER JOIN M_DISK_USAGE AS D2 ON D1.USAGE_TYPE = D2.USAGE_TYPE;" -} - - -#. -# .--main----------------------------------------------------------------. -# | _ | -# | _ __ ___ __ _(_)_ __ | -# | | '_ ` _ \ / _` | | '_ \ | -# | | | | | | | (_| | | | | | | -# | |_| |_| |_|\__,_|_|_| |_| | -# | | -# '----------------------------------------------------------------------' - - -echo "<<>>" -$LSSAP - -for line in $($LSSAP | awk -F"|" '{if ($0 ~/\//) print tolower($1)"adm|"$2}' | sed "s/\s*//g") -do - sidadm=$(echo "$line" | cut -d"|" -f1) - sidadm_short=$(echo "$sidadm" | sed "s/adm$//") - inst_nr=$(echo "$line" | cut -d"|" -f2) - process_list $sidadm $inst_nr - hdb_version $sidadm - - DB_USER=DB_USER_$sidadm_short - DB_USER=${!DB_USER} - DB_PASSWORD=DB_PASSWORD_$sidadm_short - DB_PASSWORD=${!DB_PASSWORD} - if [ -n "$DB_USER" ] && [ -n "$DB_PASSWORD" ]; - then - full_backup $sidadm $inst_nr ${DB_USER} ${DB_PASSWORD} - sap_hana_mem $sidadm $inst_nr ${DB_USER} ${DB_PASSWORD} - sap_hana_filesystem $sidadm $inst_nr ${DB_USER} ${DB_PASSWORD} - fi -done diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_saprouter b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_saprouter deleted file mode 100755 index 0751ac1f..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_saprouter +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2016 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -# Plugin for SAP router - - -. $MK_CONFDIR/saprouter.cfg || exit 1 - - -if [ ! -z "$SAPROUTER_USER" -a ! -z "$SAPGENPSE_PATH" ] -then - if type ${SAPGENPSE_PATH}/sapgenpse > /dev/null - then - echo "<<>>" - su - $SAPROUTER_USER -c "${SAPGENPSE_PATH}/sapgenpse get_my_name -n validity 2>&1" - fi -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_scaleio b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_scaleio deleted file mode 100755 index 7f0b8b7a..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_scaleio +++ /dev/null @@ -1,88 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Plugin for EMCs ScaleIO software. This plugins needs to be installed -# on all MDM servers as the information is provdided only by an active -# MDM member. Also it is recommended to configure a cluster host with -# these members as nodes in Check_MK and apply the ScaleIO services to -# this cluster host. - -# This plugin needs a configuration file in your MK_CONFDIR. This should -# be /etc/check_mk in most cases. An example of the mk_scaleio.cfg is -# the following: -# -# SIO_USER=myUser -# SIO_PASSWORD=myPassword - - -if [ ! "$MK_CONFDIR" ]; then - echo "MK_CONFDIR not set!" >&2 - exit 1 -fi - -. $MK_CONFDIR/mk_scaleio.cfg || exit 1 - -if [ -z "$SIO_USER" -o -z "$SIO_PASSWORD" ]; then - echo "Please set SIO_USER and SIO_PASSWORD in $MK_CONFDIR/mk_scalio.cfg" >&2 - exit 1 -fi - -if type scli > /dev/null -then - scli --login --username $SIO_USER --password $SIO_PASSWORD >/dev/null 2>&1 - - if [ $? == 1 ]; then - # Login fails if MDM server is not master. In this case we do - # absolutely nothing but quit this script! The data will be - # fetched elsewhere and we do not want to confuse Check_MK. - exit 1 - fi - - # System - echo '<<>>' - scli --query_properties --object_type SYSTEM --all_objects --properties ID,NAME,CAPACITY_ALERT_HIGH_THRESHOLD,CAPACITY_ALERT_CRITICAL_THRESHOLD,MAX_CAPACITY_IN_KB,UNUSED_CAPACITY_IN_KB - - # MDM - echo '<<>>' - scli --query_cluster - - # SDS - echo '<<>>' - scli --query_properties --object_type SDS --all_objects --properties ID,NAME,PROTECTION_DOMAIN_ID,STATE,MEMBERSHIP_STATE,MDM_CONNECTION_STATE,MAINTENANCE_MODE_STATE,MAX_CAPACITY_IN_KB,UNUSED_CAPACITY_IN_KB, - - # Volume - echo '<<>>' - scli --query_properties --object_type VOLUME --all_objects --properties ID,NAME,SIZE,USER_DATA_READ_BWC,USER_DATA_WRITE_BWC - - # Protection Domain - echo '<<>>' - scli --query_properties --object_type PROTECTION_DOMAIN --all_objects --properties ID,NAME,STATE,MAX_CAPACITY_IN_KB,UNUSED_CAPACITY_IN_KB - - # Storage Pool - echo '<<>>' - scli --query_properties --object_type STORAGE_POOL --all_objects --properties ID,NAME,MAX_CAPACITY_IN_KB,UNUSED_CAPACITY_IN_KB,FAILED_CAPACITY_IN_KB,TOTAL_READ_BWC,TOTAL_WRITE_BWC,REBALANCE_READ_BWC,REBALANCE_WRITE_BWC, - - scli --logout >/dev/null 2>&1 -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_site_object_counts b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_site_object_counts deleted file mode 100755 index 8241a69f..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_site_object_counts +++ /dev/null @@ -1,140 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -# .--queries-------------------------------------------------------------. -# | _ | -# | __ _ _ _ ___ _ __(_) ___ ___ | -# | / _` | | | |/ _ \ '__| |/ _ \/ __| | -# | | (_| | |_| | __/ | | | __/\__ \ | -# | \__, |\__,_|\___|_| |_|\___||___/ | -# | |_| | -# '----------------------------------------------------------------------' - - -function get_tag_stats () { - local socket="$1" - local site="$2" - local header="$3" - local tags=($(echo "$header" | tr ' ' '\n')) - if [ "${#tags[@]}" -ne 0 ] && [ -S "$socket" ]; - then - local tags_query="GET hosts\n" - for tag in "${tags[@]}"; - do - tags_query="${tags_query}Stats: custom_variables ~ TAGS ${tag}\n" - done - echo "Tags|$header|$(echo -e "${tags_query}" | waitmax 3 "/omd/sites/${site}/bin/unixcat" "${socket}")" - fi -} - - -function get_check_command_stats () { - local socket="$1" - local site="$2" - local header="$3" - local service_check_commands=($(echo "$header" | tr ' ' '\n')) - if [ "${#service_check_commands[@]}" -ne 0 ] && [ -S "$socket" ]; - then - local service_check_commands_query="GET services\n" - for service_check_command in "${service_check_commands[@]}"; - do - service_check_commands_query="${service_check_commands_query}Stats: check_command ~~ ${service_check_command}$\n" - done - echo "Service check commands|$header|$(echo -e "${service_check_commands_query}" | waitmax 3 "/omd/sites/${site}/bin/unixcat" "${socket}")" - fi -} - - -#. -# .--main----------------------------------------------------------------. -# | _ | -# | _ __ ___ __ _(_)_ __ | -# | | '_ ` _ \ / _` | | '_ \ | -# | | | | | | | (_| | | | | | | -# | |_| |_| |_|\__,_|_|_| |_| | -# | | -# '----------------------------------------------------------------------' - -SITES= -TAGS= -SERVICE_CHECK_COMMANDS= - - -if [ -e "$MK_CONFDIR/site_object_counts.cfg" ]; -then - . "$MK_CONFDIR/site_object_counts.cfg" -fi - - -if type omd >/dev/null; -then - echo "<<>>" - if [ -n "$SITES" ]; - then - sites=($(echo "$SITES" | tr ' ' '\n')) - else - sites=($(omd sites | cut -d' ' -f1)) - fi - - if [ -n "$TAGS" ]; - then - tags="$TAGS" - fi - - if [ -n "$SERVICE_CHECK_COMMANDS" ]; - then - service_check_commands="$SERVICE_CHECK_COMMANDS" - fi - - for site in "${sites[@]}"; - do - site_tags="TAGS_$site" - site_tags=${!site_tags} - if [ -n "$tags" ] && [ -n "$site_tags" ]; - then - site_tags="$tags $site_tags" - elif [ -n "$tags" ]; - then - site_tags="$tags" - fi - - site_service_check_commands="SERVICE_CHECK_COMMANDS_$site" - site_service_check_commands=${!site_service_check_commands} - if [ -n "$service_check_commands" ] && [ -n "$site_service_check_commands" ]; - then - site_service_check_commands="$service_check_commands $site_service_check_commands" - elif [ -n "$service_check_commands" ]; - then - site_service_check_commands="$service_check_commands" - fi - - socket="/omd/sites/${site}/tmp/run/live" - - echo "[[[$site]]]" - get_tag_stats "$socket" "$site" "$site_tags" - get_check_command_stats "$socket" "$site" "$site_service_check_commands" - done -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sshd_config b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sshd_config deleted file mode 100755 index c167ccad..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_sshd_config +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -SSHD_CONFIG=/etc/ssh/sshd_config - -if [ -f $SSHD_CONFIG ] ; then - echo "<<>>" - sed -e '/^#/d' -e '/^\s*$/d' -e 's/\s\+/ /g' $SSHD_CONFIG -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_suseconnect b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_suseconnect deleted file mode 100755 index 17e1794b..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_suseconnect +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2017 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -if type SUSEConnect > /dev/null ; then - echo '<<>>' - SUSEConnect --status-text | sed -e '/^-/ d' -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_tinkerforge b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_tinkerforge deleted file mode 100755 index 6caffc08..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_tinkerforge +++ /dev/null @@ -1,332 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2016 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - -################################################### -# plugin to retrieve data from tinkerforge devices. -# -# please note that for this plugin to work, the tinkerforge api has to be installed -# (included in OMD, otherwise get it from http://download.tinkerforge.com/bindings/python/) -# Also, if the tinkerforge device is connected directly to the computer via usb, -# the brick deamon has to be installed and running: http://download.tinkerforge.com/tools/brickd/) -# -# This has been designed to also work as a special agent. In this case the following configuration -# settings have to be provided on the command line - - -####################################################### -# sample configuration (/etc/check_mk/tinkerforge.cfg): -# -# host = "localhost" -# port = 4223 -# segment_display_uid = "abc" # uid of the sensor to display on the 7-segment display -# segment_display_brightness = 2 # brightness of the 7-segment display (0-7) -# -# to find the uid of a sensor, either use brickv or run the plugin -# manually. plugin output looks like this: -# temperature,Ab3d5F.a.xyz,2475 -# xyz is the uid you're looking for. It's always the last of the dot-separated sensor path -# (Ab3d5F is the id of the master brick to which the sensor is connected, a is the port -# to which the sensor is connected) - - -################## -# developer notes: -# -# Support for individual bricklets has to be added in init_device_handlers. -# Currently the bricklets included in the Starter Kit: Server Room Monitoring are -# implemented - -# Don't have tinkerforge module during tests. So disable those checks -# pylint: disable=import-error - -import sys -import os - - -def install(): - dest = os.path.dirname(os.path.realpath(__file__)) - sys.stdout.write("installing tinkerforge python api to %s\n" % dest) - if os.path.exists(os.path.join(dest, "tinkerforge")): - sys.stdout.write("already installed\n") - return 1 - - import urllib2 - import shutil - from zipfile import ZipFile - from cStringIO import StringIO - url = "http://download.tinkerforge.com/bindings/python/tinkerforge_python_bindings_latest.zip" - response = urllib2.urlopen(url) - buf = StringIO(response.read()) - z = ZipFile(buf) - - extract_files = [f for f in z.namelist() if f.startswith("source/tinkerforge")] - z.extractall(dest, extract_files) - - shutil.move(os.path.join(dest, "source", "tinkerforge"), - os.path.join(dest, "tinkerforge")) - shutil.rmtree(os.path.join(dest, "source")) - - return 0 - - -DEFAULT_SETTINGS = { - 'host': "localhost", - 'port': 4223, - 'segment_display_uid': None, - 'segment_display_brightness': 2 -} - - -from optparse import OptionParser -import time - -# globals -segment_display_value = None -segment_display_unit = "" -segment_display = None - - -def id_to_string(identifier): - return "%s.%s.%s" % (identifier.connected_uid, - identifier.position, - identifier.uid) - - -def print_generic(settings, sensor_type, ident, factor, unit, *values): - if ident.uid == settings['segment_display_uid']: - global segment_display_value, segment_display_unit - segment_display_value = int(values[0] * factor) - segment_display_unit = unit - sys.stdout.write("%s,%s,%s\n" % - (sensor_type, id_to_string(ident), ",".join([str(val) for val in values]))) - - -def print_ambient_light(conn, settings, uid): - from tinkerforge.bricklet_ambient_light import BrickletAmbientLight - br = BrickletAmbientLight(uid, conn) - print_generic(settings, "ambient", br.get_identity(), 0.01, "L", br.get_illuminance()) - - -def print_ambient_light_v2(conn, settings, uid): - from tinkerforge.bricklet_ambient_light_v2 import BrickletAmbientLightV2 - br = BrickletAmbientLightV2(uid, conn) - print_generic(settings, "ambient", br.get_identity(), 0.01, "L", br.get_illuminance()) - - -def print_temperature(conn, settings, uid): - from tinkerforge.bricklet_temperature import BrickletTemperature - br = BrickletTemperature(uid, conn) - print_generic(settings, "temperature", br.get_identity(), 0.01, - u"\N{DEGREE SIGN}C", br.get_temperature()) - - -def print_temperature_ext(conn, settings, uid): - from tinkerforge.bricklet_ptc import BrickletPTC - br = BrickletPTC(uid, conn) - print_generic(settings, "temperature.ext", br.get_identity(), 0.01, - u"\N{DEGREE SIGN}C", br.get_temperature()) - - -def print_humidity(conn, settings, uid): - from tinkerforge.bricklet_humidity import BrickletHumidity - br = BrickletHumidity(uid, conn) - print_generic(settings, "humidity", br.get_identity(), 0.1, "RH", br.get_humidity()) - - -def print_master(conn, settings, uid): - from tinkerforge.brick_master import BrickMaster - br = BrickMaster(uid, conn) - print_generic(settings, "master", br.get_identity(), 1.0, "", - br.get_stack_voltage(), - br.get_stack_current(), - br.get_chip_temperature(), - ) - -def print_motion_detector(conn, settings, uid): - from tinkerforge.bricklet_motion_detector import BrickletMotionDetector - br = BrickletMotionDetector(uid, conn) - print_generic(settings, "motion", br.get_identity(), 1.0, "", br.get_motion_detected()) - - -def display_on_segment(conn, settings, text): - # 0x01 - # ______ - # | | - # 0x20 | | 0x02 - # |______| - # | 0x40 | - # 0x10 | | 0x04 - # |______| - # 0x08 - - CHARACTERS = { - "0" : 0x3f, "1" : 0x06, "2" : 0x5b, "3" : 0x4f, "4" : 0x66, - "5" : 0x6d, "6" : 0x7d, "7" : 0x07, "8" : 0x7f, "9" : 0x6f, - "C" : 0x39, "H" : 0x74, "L" : 0x38, "R" : 0x50, - u"\N{DEGREE SIGN}" : 0x63, - } - - from tinkerforge.bricklet_segment_display_4x7 import BrickletSegmentDisplay4x7 - br = BrickletSegmentDisplay4x7(segment_display, conn) - segments = [] - for letter in text: - if len(segments) >= 4: - break - if letter in CHARACTERS: - segments.append(CHARACTERS[letter]) - - # align to the right - segments = [0] * (4 - len(segments)) + segments - - br.set_segments(segments, settings['segment_display_brightness'], False) - - -def init_device_handlers(): - device_handlers = {} - - # storing the dev_id is not necessary but may save a little time as otherwise the module - # needs to be imported just to find out this id. If the bricklet is present the module - # gets imported anyway of course - for dev_id, module_name, clazz, handler in [ - (13, "brick_master", "BrickMaster", print_master), - (21, "bricklet_ambient_light", "BrickletAmbientLight", print_ambient_light), - (259, "bricklet_ambient_light_v2", "BrickletAmbientLightV2", print_ambient_light_v2), - (216, "bricklet_temperature", "BrickletTemperature", print_temperature), - (226, "bricklet_ptc", "BrickletPTC", print_temperature_ext), - (27, "bricklet_humidity", "BrickletHumidity", print_humidity), - (233, "bricklet_motion_detector", "BrickletMotionDetector", print_motion_detector) - ]: - if dev_id is not None: - device_handlers[dev_id] = handler - else: - module = __import__("tinkerforge." + module_name) - sub_module = module.__dict__[module_name] - device_handlers[sub_module.__dict__[clazz].DEVICE_IDENTIFIER] = handler - - return device_handlers - - -def enumerate_callback(conn, device_handlers, settings, - uid, connected_uid, position, hardware_version, - firmware_version, device_identifier, enumeration_type): - if device_identifier == 237: - global segment_display - segment_display = uid - elif device_identifier in device_handlers: - device_handlers[device_identifier](conn, settings, uid) - - -def read_config(env): - settings = DEFAULT_SETTINGS - cfg_path = os.path.join(os.getenv("MK_CONFDIR", "/etc/check_mk"), - "tinkerforge.cfg") - - if os.path.isfile(cfg_path): - execfile(cfg_path, settings, settings) - return settings - - -def main(): - -# host = "localhost" -# port = 4223 -# segment_display_uid = "abc" # uid of the sensor to display on the 7-segment display -# segment_display_brightness = 2 # brightness of the 7-segment display (0-7) - - - settings = read_config(os.environ) - parser = OptionParser() - parser.add_option("--host", dest="host", default=settings['host'], - help="host/ipaddress of the tinkerforge device", - metavar="ADDRESS") - parser.add_option("--port", dest="port", default=settings['port'], type=int, - help="port of the tinkerforge device", metavar="PORT") - parser.add_option("--segment_display_uid", - dest="uid", default=settings['segment_display_uid'], - help="uid of the bricklet which will be displayed in the 7-segment display", - metavar="UID") - parser.add_option("--segment_display_brightness", type=int, - dest="brightness", default=settings['segment_display_brightness'], - help="brightness of the 7-segment display (0-7)") - parser.add_option("--install", action="store_true", - help="install tinkerforge python api to same directory as the plugin") - - options = parser.parse_args()[0] - - settings = { - 'host': options.host, - 'port': options.port, - 'segment_display_uid': options.uid, - 'segment_display_brightness': options.brightness - } - - if options.install: - return install() - - try: - from tinkerforge.ip_connection import IPConnection - except ImportError: - sys.stdout.write("<<>>\n") - sys.stdout.write("master,0.0.0,tinkerforge api isn't installed\n") - return 1 - - conn = IPConnection() - conn.connect(settings['host'], settings['port']) - - device_handlers = init_device_handlers() - - try: - sys.stdout.write("<<>>\n") - - cb = lambda uid, connected_uid, position, hardware_version, firmware_version, \ - device_identifier, enumeration_type: \ - enumerate_callback(conn, device_handlers, settings, \ - uid, connected_uid, position, \ - hardware_version, firmware_version, \ - device_identifier, enumeration_type) - - conn.register_callback(IPConnection.CALLBACK_ENUMERATE, cb) - conn.enumerate() - - # bricklets respond asynchronously in callbacks and we have no way of knowing - # what bricklets to expect - time.sleep(0.1) - - if segment_display is not None: - if segment_display_value is not None: - display_on_segment(conn, settings, - "%d%s" % (segment_display_value, segment_display_unit)) - else: - display_on_segment(conn, settings, - "") - finally: - conn.disconnect() - - -if __name__ == "__main__": - main() - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_tsm b/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_tsm deleted file mode 100755 index d09de95a..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mk_tsm +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/ksh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Agent for Linux/UNIX for Tivoli Storage Manager (TSM) - -# Configuration is needed for username and password for dsmadmc -# You need to create a configuration file /etc/check_mk/tsm.cfg -# with the following two lines: -# TSM_USER=foo -# TSM_PASSWORD=bar -# If you have more than once instance, make sure that the upper -# login works on all of them. - -. $MK_CONFDIR/tsm.cfg || exit 1 - -if [ -z "$TSM_USER" -o -z "$TSM_PASSWORD" ] -then - echo "Please set TSM_USER and TSM_PASSWORD in $MK_CONFDIR/tsm.cfg" >&2 - exit 1 -fi - -do_tsm_checks () -{ - INST=${DSMSERV_DIR##*/} - - # If we have no instance name, we take 'default' - if [ -z "$INST" ] ; then INST=default ; fi - - dsmcmd="dsmadmc -id=$TSM_USER -pass=$TSM_PASSWORD -dataonly=yes -tab" - - # Staging Pools - echo '<<>>' - $dsmcmd <'DISK' -EOF - - # Drive Status - echo '<<>>' - $dsmcmd <>>' - $dsmcmd < /dev/null ; then - echo '<<>>' - if grep -q '^VERSION = 10' < /etc/SuSE-release - then - ZYPPER='waitmax 10 zypper --no-gpg-checks --non-interactive --terse' - REFRESH=`$ZYPPER refresh 2>&1` - if [ "$REFRESH" ] - then - echo "ERROR: $REFRESH" - else - { $ZYPPER pchk || [ $? = 100 -o $? = 101 ] && $ZYPPER lu ; } \ - | egrep '(patches needed|\|)' | egrep -v '^(#|Repository |Catalog )' - fi - else - ZYPPER='waitmax 10 zypper --no-gpg-checks --non-interactive --quiet' - REFRESH=`$ZYPPER refresh 2>&1` - if [ "$REFRESH" ] - then - echo "ERROR: $REFRESH" - else - { { $ZYPPER pchk || [ $? = 100 -o $? = 101 ] && $ZYPPER lp ; } ; $ZYPPER ll ; } \ - | egrep '(patches needed|\|)' | egrep -v '^(#|Repository)' - fi - fi -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/mtr b/ansible/roles/elnappo.check_mk_agent/files/plugins/mtr deleted file mode 100755 index 2ac9cf64..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/mtr +++ /dev/null @@ -1,384 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2016 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This plugin was sponsored by BenV. Thanks! -# https://notes.benv.junerules.com/mtr/ - -# Concept: -# Read config mtr.cfg -# For every host: -# parse outstanding reports (and delete them) -# If current time > last check + config(time)//300 start new mtr in background -# MTR results are stored in $VARDIR/mtr_${host}.report -# return previous host data - -import sys, os, re, time, glob, ConfigParser, StringIO -from unicodedata import normalize -import subprocess - -mk_confdir = os.getenv("MK_CONFDIR") or "/etc/check_mk" -mk_vardir = os.getenv("MK_VARDIR") or "/var/lib/check_mk_agent" - -config_filename = mk_confdir + "/mtr.cfg" -config_dir = mk_confdir + "/mtr.d/*.cfg" -status_filename = mk_vardir + "/mtr.state" -report_filepre = mk_vardir + "/mtr.report." - -if '-d' in sys.argv[2:] or '--debug' in sys.argv[1:]: - debug = True -else: - debug = False - - -def which(program): - def is_exe(fpath): - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) - - fpath, fname = os.path.split(program) - if fpath: - if is_exe(program): - return program - else: - for path in os.environ["PATH"].split(os.pathsep): - exe_file = os.path.join(path, program) - if is_exe(exe_file): - return exe_file - - return None - - -# See if we have mtr -mtr_prog = which('mtr') -if mtr_prog == None: - if debug: - sys.stdout.write("Could not find mtr binary\n") - sys.exit(0) - - -def read_config(): - default_options = { - 'type' : 'icmp', - 'count' : "10", - 'force_ipv4': "0", - 'force_ipv6': "0", - 'size' : "64", - 'time' : "0", - 'dns' : "0", - 'port' : None, - 'address' : None, - 'interval' : None, - 'timeout' : None - } - if not os.path.exists(config_filename): - if debug: - sys.stdout.write("Not configured, %s missing\n" % config_filename) - sys.exit(0) - - config = ConfigParser.SafeConfigParser(default_options) - # Let ConfigParser figure it out - for config_file in ( [ config_filename ] + glob.glob(config_dir)): - try: - if not config.read(config_file): - sys.stdout.write("**ERROR** Failed to parse configuration file %s!\n" % config_file) - except Exception as e: - sys.stdout.write("**ERROR** Failed to parse config file %s: %s\n" % (config_file, repr(e))) - - if len(config.sections()) == 0: - sys.stdout.write("**ERROR** Configuration defines no hosts!\n") - sys.exit(0) - - return config - -# structure of statusfile -# # HOST |LASTTIME |HOPCOUNT|HOP1|Loss%|Snt|Last|Avg|Best|Wrst|StDev|HOP2|...|HOP8|...|StdDev -# www.google.com|145122481|8|192.168.1.1|0.0%|10|32.6|3.6|0.3|32.6|10.2|192.168.0.1|...|9.8 -def read_status(): - status = {} - if not os.path.exists(status_filename): - return status - - for line in file(status_filename): - try: - parts = line.split('|') - if len(parts) < 2: - sys.stdout.write("**ERROR** (BUG) Status has less than 2 parts:\n") - sys.stdout.write("%s\n" % parts) - continue - host = parts[0] - lasttime = int(float(parts[1])) - status[host] = {'hops': {}, 'lasttime': lasttime}; - hops = int(parts[2]) - for i in range(0, hops): - status[host]["hops"][i+1] = { - 'hopname': parts[i*8 + 3].rstrip(), - 'loss' : parts[i*8 + 4].rstrip(), - 'snt' : parts[i*8 + 5].rstrip(), - 'last' : parts[i*8 + 6].rstrip(), - 'avg' : parts[i*8 + 7].rstrip(), - 'best' : parts[i*8 + 8].rstrip(), - 'wrst' : parts[i*8 + 9].rstrip(), - 'stddev' : parts[i*8 + 10].rstrip(), - } - except Exception as e: - sys.stdout.write("*ERROR** (BUG) Could not parse status line: %s, reason: %s\n" % (line, repr(e))) - return status - -def save_status(status): - f = file(status_filename, "w") - for host, hostdict in status.items(): - hopnum = len(hostdict["hops"].keys()) - lastreport = hostdict["lasttime"] - hoststring = "%s|%s|%s" % (host, lastreport, hopnum) - for hop in hostdict["hops"].keys(): - hi = hostdict["hops"][hop] - hoststring += '|%s|%s|%s|%s|%s|%s|%s|%s' % (hi['hopname'], hi['loss'], hi['snt'], hi['last'], hi['avg'], hi['best'], hi['wrst'], hi['stddev']) - hoststring = hoststring.rstrip() - f.write("%s\n" % hoststring) - -_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+') -def host_to_filename(host, delim=u'-'): - # Get rid of gibberish chars, stolen from Django - """Generates an slightly worse ASCII-only slug.""" - host=unicode(host, 'UTF-8') - result = [] - for word in _punct_re.split(host.lower()): - word = normalize('NFKD', word).encode('ascii', 'ignore') - if word: - result.append(word) - return unicode(delim.join(result)) - -def check_mtr_pid(pid): - """ Check for the existence of a unix pid and if the process matches. """ - try: - os.kill(pid, 0) - except OSError: - return False # process does no longer exist - else: - pid_cmdline = "/proc/%d/cmdline" % pid - try: - if os.path.exists(pid_cmdline): - if file(pid_cmdline).read().startswith("mtr\x00--report\x00--report-wide"): - return True - else: - return False # different process than mtr - else: - return False # cmdline no longer exists, race condition.. - except: - return False # any error - -def parse_report(host): - reportfile = report_filepre + host_to_filename(host) - if not os.path.exists(reportfile): - if not host in status.keys(): - # New host - status[host] = {'hops':{}, 'lasttime': 0} - return -# 1451228358 -# Start: Sun Dec 27 14:35:18 2015 -#HOST: purple Loss% Snt Last Avg Best Wrst StDev -# 1.|-- 80.69.76.120 0.0% 10 0.3 0.4 0.3 0.6 0.0 -# 2.|-- 80.249.209.100 0.0% 10 1.0 1.1 0.8 1.4 0.0 -# 3.|-- 209.85.240.63 0.0% 10 1.3 1.7 1.1 3.6 0.5 -# 4.|-- 209.85.253.242 0.0% 10 1.6 1.8 1.6 2.1 0.0 -# 5.|-- 209.85.253.201 0.0% 10 4.8 5.0 4.8 5.4 0.0 -# 6.|-- 216.239.56.6 0.0% 10 4.7 5.1 4.7 5.5 0.0 -# 7.|-- ??? 100.0 10 0.0 0.0 0.0 0.0 0.0 -# 8.|-- 74.125.136.147 0.0% 10 4.5 4.6 4.3 5.2 0.0 - # See if pidfile exists and if mtr is still running - if os.path.exists(reportfile + ".pid"): - # See if it's running - try: - pid = int(file(reportfile + ".pid", 'r').readline().rstrip()) - if check_mtr_pid(pid): - # Still running, we're done. - if not host in status.keys(): - # New host - status[host] = {'hops':{}, 'lasttime': 0} - status[host]['running'] = True - return - except ValueError: - # Pid file is broken. Process probably crashed.. - pass - # Done running, get rid of pid file - os.unlink(reportfile + ".pid") - - # Parse the existing report - lines = file(reportfile).readlines() - if len(lines) < 3: - sys.stdout.write("**ERROR** Report file %s has less than 3 lines, " - "expecting at least 1 hop! Throwing away invalid report\n" % reportfile) - os.unlink(reportfile) - if not host in status.keys(): - # New host - status[host] = {'hops':{}, 'lasttime': 0} - return - status[host] = {'hops':{}, 'lasttime': 0} - - hopcount = 0 - status[host]["lasttime"] = int(float(lines.pop(0))) - while len(lines) > 0 and not lines[0].startswith("HOST:"): - lines.pop(0) - if len(lines) < 2: # Not enough lines - return - try: - lines.pop(0) # Get rid of HOST: header - hopline = re.compile('^\s*\d+\.') # 10.|-- 129.250.2.147 0.0% 10 325.6 315.5 310.3 325.6 5.0 - for line in lines: - if not hopline.match(line): - continue; # | `|-- 129.250.2.159 - hopcount += 1 - parts = line.split() - if len(parts) < 8: - sys.stdout.write("**ERROR** Bug parsing host/hop, " - "line has less than 8 parts: %s\n" % line) - continue; - status[host]['hops'][hopcount] = { - 'hopname': parts[1], - 'loss' : parts[2], - 'snt' : parts[3], - 'last' : parts[4], - 'avg' : parts[5], - 'best' : parts[6], - 'wrst' : parts[7], - 'stddev' : parts[8], - } - except Exception, e: - sys.stdout.write("**ERROR** Could not parse report file %s, " - "tossing away invalid data %s\n" % (reportfile, e)) - del status[host] - os.unlink(reportfile) - -def output_report(host): - hostdict = status.get(host) - if not hostdict: - return - - hopnum = len(hostdict["hops"].keys()) - lastreport = hostdict["lasttime"] - hoststring = "%s|%s|%s" % (host, lastreport, hopnum) - for hop in hostdict["hops"].keys(): - hi = hostdict["hops"][hop] - hoststring += '|%s|%s|%s|%s|%s|%s|%s|%s' % (hi['hopname'], hi['loss'], hi['snt'], hi['last'], hi['avg'], hi['best'], hi['wrst'], hi['stddev']) - sys.stdout.write("%s\n" % hoststring) - -def start_mtr(host): - options = [mtr_prog, '--report', '--report-wide'] - pingtype = config.get(host, "type") - count = config.getint(host, "count") - ipv4 = config.getboolean(host, "force_ipv4") - ipv6 = config.getboolean(host, "force_ipv6") - size = config.getint(host, "size") - lasttime = config.getint(host, "time") - dns = config.getboolean(host, "dns") - port = config.get(host, "port") - address = config.get(host, "address") - interval = config.get(host, "interval") - timeout = config.get(host, "timeout") - - if "running" in status[host].keys(): - if debug: - sys.stdout.write("MTR for host still running, not restarting MTR!\n") - return - - if time.time() - status[host]["lasttime"] < lasttime: - if debug: - sys.stdout.write("%s - %s = %s is smaller than %s => mtr run not needed yet.\n" % - (time.time(), status[host]["lasttime"], time.time() - status[host]["lasttime"], lasttime)) - return - - - pid = os.fork() - if pid > 0: - # Parent process, return and keep running - return - - os.chdir("/") - os.umask(0) - os.setsid() - - # Close all fd except stdin,out,err - for fd in range(3, 256): - try: - os.close(fd) - except OSError: - pass - - if pingtype == 'tcp': - options.append("--tcp") - if pingtype == 'udp': - options.append("--udp") - if not port == None: - options.append("--port") - options.append(str(port)) - if ipv4 == True: - options.append("-4") - if ipv6 == True: - options.append("-6") - options.append("-s") - options.append(str(size)) - options.append("-c") - options.append(str(count)) - if dns == False: - options.append("--no-dns") - if not address == None: - options.append("--address") - options.append(str(address)) - if not interval == None: - options.append("-i") - options.append(str(interval)) - if not timeout == None: - options.append("--timeout") - options.append(str(timeout)) - - options.append(str(host)) - if debug: - sys.stdout.write("Startin MTR: %s\n" % (" ".join(options))) - reportfile = report_filepre + host_to_filename(host) - if (os.path.exists(reportfile)): - os.unlink(reportfile) - report=open(reportfile, 'a+') - report.write(str(int(time.time())) + "\n") - report.flush() - process = subprocess.Popen(options, stdout=report, stderr=report) - # Write pid to report.pid - pidfile=open(reportfile + ".pid", 'w') - pidfile.write("%d\n" % process.pid) - pidfile.flush() - pidfile.close() - os._exit(os.EX_OK) - -# Parse config -sys.stdout.write("<<>>\n") -config = read_config() -status = read_status() -for host in config.sections(): - # Parse outstanding report - parse_report(host) - # Output last known values - output_report(host) - # Start new if needed - start_mtr(host) -save_status(status) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/netstat.aix b/ansible/roles/elnappo.check_mk_agent/files/plugins/netstat.aix deleted file mode 100755 index 55b98d32..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/netstat.aix +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This is not part of the standard agent since it can produce much -# output data of the table is large. This plugin is just needed for -# checking if certain known TCP connections are established. - -echo '<<>>' -netstat -n -f inet | fgrep -v '*.*' | egrep '^(tcp|udp)' diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/netstat.linux b/ansible/roles/elnappo.check_mk_agent/files/plugins/netstat.linux deleted file mode 100755 index 55a27aeb..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/netstat.linux +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This is not part of the standard agent since it can take very -# long to run if your TCP/UDP table is large. Netstat seems to -# have an execution time complexity of at least O(n^2) on Linux. - -echo '<<>>' -netstat -ntua | egrep '^(tcp|udp)' | sed -e 's/LISTEN/LISTENING/g' diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/nfsexports b/ansible/roles/elnappo.check_mk_agent/files/plugins/nfsexports deleted file mode 100755 index 685b81f7..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/nfsexports +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# this check will only run if we have a working nfs environment or SHOULD have one. -# not tested for nfs3 - -# verify if there are exports defined in your local /etc/exports -if [ -r /etc/exports ]; then - EXPORTS=$(grep -v -e ^# -e ^$ /etc/exports) -fi - -if [ "$EXPORTS" ] && pgrep '(portmap|rpcbind)' >/dev/null && pgrep rpc.mountd >/dev/null -then - echo "<<>>" - waitmax 3 showmount --no-headers -e -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/nfsexports.solaris b/ansible/roles/elnappo.check_mk_agent/files/plugins/nfsexports.solaris deleted file mode 100755 index ba571a02..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/nfsexports.solaris +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Check_MK agent plugin for monitoring nfsexports on Solaris. This plugin -# has been tested with solaris 10 in a standalone and cluster setting. - -clusterconfigdir="/etc/cluster/ccr/global/directory" -if [ -r $clusterconfigdir ]; then - # is a clustered nfs server - nfsconfig=/etc/cluster/ccr/global/`grep rgm $clusterconfigdir | grep nfs | grep rg_` - if [ -r $nsconfig ]; then - Pathprefix=`grep Pathprefix $nfsconfig | awk {'print $2'}`/SUNW.nfs - dfstabfile=$Pathprefix/dfstab.`grep -v FilesystemMountPoints $nfsconfig | grep SUNW.nfs | \ - awk {'print $1'} | sed -e 's/RS_//'` - if [ -r $dfstabfile ]; then - EXPORTS=`grep -v ^# $dfstabfile | grep -v ^$` - ps -aef | grep nfsd | grep $Pathprefix >/dev/null && DAEMONS="ok" - fi - fi -else - # is a standalone nfs server - dfstabfile="/etc/dfs/dfstab" - if [ -r $dfstabfile ]; then - EXPORTS=`grep -v ^# $dfstabfile | grep -v ^$` - svcs -a | grep nfs/server | grep ^online >/dev/null && DAEMONS="ok" - fi -fi - -# any exports or have running daemons? then look for registered exports -if [ "$EXPORTS" ]; then - echo "<<>>" - if [ "$DAEMONS" ]; then - showmount -e | grep ^/ - fi -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/nginx_status b/ansible/roles/elnappo.check_mk_agent/files/plugins/nginx_status deleted file mode 100755 index 2bb9ed3c..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/nginx_status +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Check_MK-Agent-Plugin - Nginx Server Status -# -# Fetches the stub nginx_status page from detected or configured nginx -# processes to gather status information about this process. -# -# Take a look at the check man page for details on how to configure this -# plugin and check. -# -# By default this plugin tries to detect all locally running processes -# and to monitor them. If this is not good for your environment you might -# create an nginx_status.cfg file in MK_CONFDIR and populate the servers -# list to prevent executing the detection mechanism. - -import os, sys, urllib2, re - -# tell urllib2 not to honour "http(s)_proxy" env variables -urllib2.getproxies = lambda: {} - -config_dir = os.getenv("MK_CONFDIR", "/etc/check_mk") -config_file = config_dir + "/nginx_status.cfg" - -# None or list of (proto, ipaddress, port) tuples. -# proto is 'http' or 'https' -servers = None -ssl_ports = [ 443, ] - -if os.path.exists(config_file): - execfile(config_file) - -def try_detect_servers(): - pids = [] - results = [] - for line in os.popen('netstat -tlnp 2>/dev/null').readlines(): - parts = line.split() - # Skip lines with wrong format - if len(parts) < 7 or '/' not in parts[6]: - continue - - pid, proc = parts[6].split('/', 1) - to_replace = re.compile('^.*/') - proc = to_replace.sub('', proc) - - procs = [ 'nginx', 'nginx:', 'nginx.conf' ] - # the pid/proc field length is limited to 19 chars. Thus in case of - # long PIDs, the process names are stripped of by that length. - # Workaround this problem here - procs = [ p[:19 - len(pid) - 1] for p in procs ] - - # Skip unwanted processes - if proc not in procs: - continue - - # Add only the first found port of a single server process - if pid in pids: - continue - pids.append(pid) - - proto = 'http' - address, port = parts[3].rsplit(':', 1) - port = int(port) - - # Use localhost when listening globally - if address == '0.0.0.0': - address = '127.0.0.1' - elif address == '::': - address = '::1' - - # Switch protocol if port is SSL port. In case you use SSL on another - # port you would have to change/extend the ssl_port list - if port in ssl_ports: - proto = 'https' - - results.append((proto, address, port)) - - return results - -if servers is None: - servers = try_detect_servers() - -if not servers: - sys.exit(0) - -sys.stdout.write('<<>>\n') -for server in servers: - if isinstance(server, tuple): - proto, address, port = server - page = 'nginx_status' - else: - proto = server['protocol'] - address = server['address'] - port = server['port'] - page = server.get('page', 'nginx_status') - - try: - url = '%s://%s:%s/%s' % (proto, address, port, page) - # Try to fetch the status page for each server - try: - request = urllib2.Request(url, headers={"Accept" : "text/plain"}) - fd = urllib2.urlopen(request) - except urllib2.URLError, e: - if 'SSL23_GET_SERVER_HELLO:unknown protocol' in str(e): - # HACK: workaround misconfigurations where port 443 is used for - # serving non ssl secured http - url = 'http://%s:%s/%s' % (address, port, page) - fd = urllib2.urlopen(url) - else: - raise - - for line in fd.read().split('\n'): - if not line.strip(): - continue - if line.lstrip()[0] == '<': - # seems to be html output. Skip this server. - break - sys.stdout.write("%s %s %s\n" % (address, port, line)) - except urllib2.HTTPError, e: - sys.stderr.write('HTTP-Error (%s:%d): %s %s\n' % (address, port, e.code, e)) - - except Exception, e: - sys.stderr.write('Exception (%s:%d): %s\n' % (address, port, e)) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/plesk_backups b/ansible/roles/elnappo.check_mk_agent/files/plugins/plesk_backups deleted file mode 100755 index f24369eb..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/plesk_backups +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Monitors FTP backup spaces of plesk domains. -# Data format -# -# <<>> -# - -import MySQLdb, sys, datetime, time, os -from ftplib import FTP - -def connect(): - # Fix pylint issues in case MySQLdb is not present - # pylint: disable=no-member - try: - return MySQLdb.connect( - host = 'localhost', - db = 'psa', - user = 'admin', - passwd = file('/etc/psa/.psa.shadow').read().strip(), - charset = 'utf8', - ) - except MySQLdb.Error, e: - sys.stderr.write("MySQL-Error %d: %s\n" % (e.args[0], e.args[1])) - sys.exit(1) - -def get_domains(): - cursor = db.cursor() - cursor2 = db.cursor() - - cursor.execute('SELECT id, name FROM domains') - domains = {} - for domain_id, domain in cursor.fetchall(): - cursor2.execute('SELECT param, value FROM BackupsSettings ' - 'WHERE id = %d AND type = \'domain\'' % domain_id) - params = dict(cursor2.fetchall()) - domains[domain] = params - - cursor2.close() - cursor.close() - return domains - -# -# MAIN -# - -db = connect() - -# 1. Virtual Hosts / Domains auflisten -# 2. Backupkonfiguration herausfinden -domains = get_domains() - -# 3. Per FTP verbinden -# 4. Alter und Größe der neuesten Datei herausfinden -# 5. Größe aller Dateien in Summe herausfinden -# -# 6. Neuer Monat? -# 7. Auf FTP neues Verzeichnis anlegen: _2012 -# 8. Konfiguration in Plesk anpassen -output = ['<<>>'] -for domain, p in domains.iteritems(): - try: - if not p: - output.append('%s 4' % domain) # Backup nicht konfiguriert - continue - - ftp = FTP( - p['backup_ftp_settinghost'], - p['backup_ftp_settinglogin'], - p['backup_ftp_settingpassword'] - ) - - # Zeilen holen - files = [] - ftp.retrlines( - 'LIST %s' % p['backup_ftp_settingdirectory'], - callback = files.append - ) - # example line: - # -rw----r-- 1 b091045 cust 13660160 Dec 3 01:50 bla_v8_bla-v8.bla0.net_1212030250.tar - - # Zeilen formatieren - last_backup = None - backups = [] - for line in files: - parts = line.split() - if parts[-1].endswith('.tar'): - dt = datetime.datetime(*time.strptime(parts[-1][-14:-4], '%y%m%d%H%M')[0:5]) - backup = (parts[-1], dt, int(parts[-5])) - - if not last_backup or dt > last_backup[1]: - last_backup = backup - backups.append(backup) - - if not backups: - output.append('%s 5' % domain) # Keine Sicherungen vorhanden - continue - - # Get total size of all files on FTP - f = [] - def get_size(base_dir, l = None): - if l and l.split()[-1] in ['.', '..']: - return 0 - - size = 0 - if not l or l[0] == 'd': - subdir = l and '/' + l.split()[-1] or '' - dir_files = [] - ftp.retrlines('LIST %s%s' % (base_dir, subdir), - callback = dir_files.append - ) - for line in dir_files: - size += get_size('%s%s' % (base_dir, subdir), line) - else: - size += int(l.split()[-5]) - return size - total_size = get_size('') - - output.append('%s 0 %s %d %d' % (domain, last_backup[1].strftime('%s'), last_backup[2], total_size)) - - except Exception, e: - output.append('%s 2 %s' % (domain, e)) - -# Write cache and output -sys.stdout.write('%s\n' % '\n'.join(output)) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/plesk_domains b/ansible/roles/elnappo.check_mk_agent/files/plugins/plesk_domains deleted file mode 100755 index 75d496da..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/plesk_domains +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# Lists all domains configured in plesk -# -# <<>> -# - -import sys -import MySQLdb - -try: - db = MySQLdb.connect( - host = 'localhost', - db = 'psa', - user = 'admin', - passwd = file('/etc/psa/.psa.shadow').read().strip(), - charset = 'utf8', - ) -except MySQLdb.Error, e: # pylint: disable=no-member - sys.stderr.write("MySQL-Error %d: %s\n" % (e.args[0], e.args[1])) - sys.exit(1) - -cursor = db.cursor() -cursor.execute('SELECT name FROM domains') -sys.stdout.write('<<>>\n') -sys.stdout.write("%s\n" % '\n'.join([ d[0] for d in cursor.fetchall() ])) -cursor.close() diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/runas b/ansible/roles/elnappo.check_mk_agent/files/plugins/runas deleted file mode 100755 index 61bef912..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/runas +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This plugin allows to execute mrpe, local and plugin skripts with a different user context -# It is configured with in the file $MK_CONFDIR/runas.cfg -# -# Syntax: -# [Script type] [User context] [File / Directory] -# -# Example configuration -# # Execute mrpe commands in given files under specific user -# # A '-' means no user context switch -# mrpe ab /home/ab/mrpe_commands.cfg -# mrpe lm /home/lm/mrpe_commands.cfg -# mrpe - /root/mrpe/extra_commands.cfg -# -# Excecute -executable- files in the target directories under specific user context -# plugin ab /var/ab/plugins -# local ab /var/ab/local -# - -grep -Ev '^[[:space:]]*($|#)' "$MK_CONFDIR/runas.cfg" | \ -while read type user include -do - if [ -d $include -o \( "$type" == "mrpe" -a -f $include \) ] ; then - PREFIX="" - if [ "$user" != "-" ] ; then - PREFIX="su $user -c " - fi - - # mrpe includes - if [ "$type" == "mrpe" ] ; then - echo "<<>>" - grep -Ev '^[[:space:]]*($|#)' "$include" | \ - while read descr cmdline - do - PLUGIN=${cmdline%% *} - if [ -n "$PREFIX" ] ; then - cmdline="$PREFIX\"$cmdline\"" - fi - OUTPUT=$(eval "$cmdline") - echo -n "(${PLUGIN##*/}) $descr $? $OUTPUT" | tr \\n \\1 - echo - done - # local and plugin includes - elif [ "$type" == "local" -o "$type" == "plugin" ] ; then - if [ "$type" == "local" ] ; then - echo "<<>>" - fi - find $include -executable -type f | \ - while read filename - do - if [ -n "$PREFIX" ] ; then - cmdline="$PREFIX\"$filename\"" - else - cmdline=$filename - fi - $cmdline - done - fi - fi -done diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/smart b/ansible/roles/elnappo.check_mk_agent/files/plugins/smart deleted file mode 100755 index 6e28c91f..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/smart +++ /dev/null @@ -1,179 +0,0 @@ -#!/bin/bash -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - - - - -# This will be called on LSI based raidcontrollers and accesses -# the SMART data of SATA disks attached to a SAS Raid HBA via -# SCSI protocol interface. -megaraid_info() -{ - #PDINFO=$(MegaCli -PDlist -a0) - if [ -z "$1" ]; then - PDINFO=$(megacli -PDlist -a0 -NoLog) - else - PDINFO=$($1 -PDlist -a0 -NoLog) - fi - - echo "$PDINFO" | \ - while read line ; do - case "$line" in - # FIRST LINE - "Enclosure Device ID"*) #Enclosure Device ID: 252 - ENC=$( echo "$line" | awk '{print $4}') - unset SLOT LOG_DEV_ID VEND MODEL - ;; - "Slot Number"*) #Slot Number: 7 - SLOT=$( echo "$line" | awk '{print $3}') - ;; - # Identify the logical device ID. smartctl needs it to access the disk. - "Device Id"*) #Device Id: 19 - LOG_DEV_ID=$( echo "$line" | awk '{print $3}') - ;; - "PD Type"*) #PD Type: SATA - VEND=$( echo "$line" | awk '{print $3}') - ;; - # This is the last value, generate output here - "Inquiry Data"*) - #Inquiry Data: WD-WCC1T1035197WDC WD20EZRX-00DC0B0 80.00A80 - # $4 seems to be better for some vendors... wont be possible to get this perfect. - MODEL=$( echo "$line" | awk '{print $3}') - - # /dev/sdc ATA SAMSUNG_SSD_830 5 Reallocated_Sector_Ct 0x0033 100 100 010 Pre-fail Always - - smartctl -d megaraid,${LOG_DEV_ID} -v 9,raw48 -A /dev/sg0 | \ - grep Always | egrep -v '^190(.*)Temperature(.*)' | \ - sed "s|^|Enc${ENC}/Slot${SLOT} $VEND $MODEL |" - ;; - esac - done -} - - -# Only handle always updated values, add device path and vendor/model -if which smartctl > /dev/null 2>&1 ; then - # - # if the 3ware-utility is found - # get the serials for all disks on the controller - # - if which tw_cli > /dev/null 2>&1 ; then - # support for only one controller at the moment - TWAC=$(tw_cli show | awk 'NR < 4 { next } { print $1 }' | head -n 1) - - # - add a trailing zero to handle case of unused slot - # trailing zeros are part of the device links in /dev/disk/by-id/... anyway - # - only the last 9 chars seem to be relevant - # (hopefully all this doesn't change with new kernels...) - eval `tw_cli /$TWAC show drivestatus | grep -E '^p[0-9]' | awk '{print $1 " " $7 "0"}' | while read twaminor serial ; do - twaminor=${twaminor#p} - serial=${serial:(-9)} - serial=AMCC_${serial}00000000000 - echo "$serial=$twaminor" - done` - else: - echo "tw_cli not found" >&2 - fi - - echo '<<>>' - SEEN= - for D in /dev/disk/by-id/{scsi,ata}-*; do - [ "$D" != "${D%scsi-\*}" ] && continue - [ "$D" != "${D%ata-\*}" ] && continue - [ "$D" != "${D%-part*}" ] && continue - N=$(readlink $D) - N=${N##*/} - if [ -r /sys/block/$N/device/vendor ]; then - VEND=$(tr -d ' ' < /sys/block/$N/device/vendor) - else - # 2012-01-25 Stefan Kaerst CDJ - in case $N does not exist - VEND=ATA - fi - if [ -r /sys/block/$N/device/model ]; then - MODEL=$(sed -e 's/ /_/g' -e 's/_*$//g' < /sys/block/$N/device/model) - else - MODEL=$(smartctl -a $D | grep -i "device model" | sed -e "s/.*:[ ]*//g" -e "s/\ /_/g") - fi - # Excluded disk models for SAN arrays or certain RAID luns that are also not usable.. - if [ "$MODEL" = "iSCSI_Disk" -o "$MODEL" = "LOGICAL_VOLUME" ]; then - continue - fi - - # Avoid duplicate entries for same device - if [ "${SEEN//.$N./}" != "$SEEN" ] ; then - continue - fi - SEEN="$SEEN.$N." - - # strip device name for final output - DNAME=${D#/dev/disk/by-id/scsi-} - DNAME=${DNAME#/dev/disk/by-id/ata-} - # 2012-01-25 Stefan Kaerst CDJ - special option in case vendor is AMCC - CMD= - if [ "$VEND" == "AMCC" -a -n "$TWAC" ]; then - DNAME=${DNAME#1} - [ -z "${!DNAME}" ] && continue - CMD="smartctl -d 3ware,${!DNAME} -v 9,raw48 -A /dev/twa0" - # create nice device name including model - MODEL=$(tw_cli /$TWAC/p${!DNAME} show model | head -n 1 | awk -F= '{ print $2 }') - MODEL=${MODEL## } - MODEL=${MODEL// /-} - DNAME=${DNAME#AMCC_} - DNAME="AMCC_${MODEL}_${DNAME%000000000000}" - elif [ "$VEND" != "ATA" ] ; then - TEMP= - # create temperature output as expected by checks/smart - # this is a hack, TODO: change checks/smart to support SCSI-disks - eval `smartctl -d scsi -i -A $D | while read a b c d e ; do - [ "$a" == Serial ] && echo SN=$c - [ "$a" == Current -a "$b" == Drive -a "$c" == Temperature: ] && echo TEMP=$d - done` - [ -n "$TEMP" ] && CMD="echo 194 Temperature_Celsius 0x0000 000 000 000 Old_age Always - $TEMP (0 0 0 0)" - DNAME="${VEND}_${MODEL}_${SN}" - else - CMD="smartctl -d ata -v 9,raw48 -A $D" - fi - - [ -n "$CMD" ] && $CMD | grep Always | egrep -v '^190(.*)Temperature(.*)' | sed "s|^|$DNAME $VEND $MODEL |" - done 2>/dev/null - - - # Call MegaRaid submodule if conditions are met - if type MegaCli >/dev/null 2>&1; then - MegaCli_bin="MegaCli" - elif type MegaCli64 >/dev/null 2>&1; then - MegaCli_bin="MegaCli64" - elif type megacli >/dev/null 2>&1; then - MegaCli_bin="megacli" - else - MegaCli_bin="unknown" - fi - - if [ "$MegaCli_bin" != "unknown" ]; then - megaraid_info "$MegaCli_bin" - fi -else - echo "ERROR: smartctl not found" >&2 -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/symantec_av b/ansible/roles/elnappo.check_mk_agent/files/plugins/symantec_av deleted file mode 100755 index f5419941..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/symantec_av +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -if [ -f /opt/Symantec/symantec_antivirus/sav ] -then - echo "<<>>" - /opt/Symantec/symantec_antivirus/sav info -d - - echo "<<>>" - /opt/Symantec/symantec_antivirus/sav info -a - - echo "<<>>" - /opt/Symantec/symantec_antivirus/sav quarantine -l -fi - diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/unitrends_backup b/ansible/roles/elnappo.check_mk_agent/files/plugins/unitrends_backup deleted file mode 100755 index 10603801..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/unitrends_backup +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/php ->>\n"; -$conn = "port=5432 dbname=bpdb user=postgres"; -$db = pg_connect($conn); - -$query = "SELECT - schedule_id, a.type AS app_type - FROM - bp.schedules AS s - JOIN - bp.application_lookup AS a USING(app_id) - WHERE - enabled=true AND email_report=true - ORDER BY s.name"; -$res = pg_query($db, $query); - -$start = time() - (24 * 3600); -$in = array("start_time" => $start); -bp_bypass_cookie(3, 'schedule_report'); - -while ($obj = pg_fetch_object($res)) { - if ($obj->app_type == "Archive") - continue; - - $in["schedule_id"] = (int)$obj->schedule_id; - $ret = bp_get_schedule_history($in); - if (empty($ret[0]["backups"])) - continue; - - print "HEADER|". - $ret[0]["schedule_name"]."|" . - $ret[0]["application_name"]."|". - $ret[0]["schedule_description"]."|". - $ret[0]["failures"]."\n"; - - foreach($ret[0]["backups"] as $trash => $backup) { - foreach($backup as $row) { - - $name = $row["primary_name"]; - switch($ret[0]["app_type"]){ - case "SQL Server": - $name .= "/".$row["secondary_name"]; - break; - - case "VMware": - $name .= ", VM ".$row["secondary_name"]; - break; - } - - $backup_type = $row["type"]; - - if (!isset($name)) - $name = $backup_tyoe; - - $backup_no = (isset($row["backup_id"])) ? $row["backup_id"] : "N/A" ; - - print "$name|$backup_no|$backup_type|".$row['description']."\n"; - } - } -} -pg_free_result($res); -bp_destroy_cookie(); -?> diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/unitrends_replication b/ansible/roles/elnappo.check_mk_agent/files/plugins/unitrends_replication deleted file mode 100755 index 1efcf846..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/unitrends_replication +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/python -# -*- encoding: utf-8; py-indent-offset: 4 -*- -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -import sys, time, urllib -from xml.dom import minidom -now = int(time.time()) -start = now - 24 * 60 * 60 -end = now -dpu = 1 - -url = "http://localhost/recoveryconsole/bpl/syncstatus.php?type=replicate&arguments=start:%s,end:%s&sid=%s&auth=1:" % ( start, end, dpu ) -xml = urllib.urlopen(url) - -sys.stdout.write("<<>>\n") -dom = minidom.parse(xml) -for item in dom.getElementsByTagName('SecureSyncStatus'): - application = item.getElementsByTagName('Application') - if application: - application = application[0].attributes['Name'].value - else: - application = "N/A" - result = item.getElementsByTagName('Result')[0].firstChild.data - completed = item.getElementsByTagName('Complete')[0].firstChild.data - targetname = item.getElementsByTagName('TargetName')[0].firstChild.data - instancename = item.getElementsByTagName('InstanceName')[0].firstChild.data - sys.stdout.write("%s|%s|%s|%s|%s\n" % - (application, result, completed, targetname, instancename)) diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/vxvm b/ansible/roles/elnappo.check_mk_agent/files/plugins/vxvm deleted file mode 100755 index 5fe2dec8..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/vxvm +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/sh -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# This plugin has been tested on Linux and HPUX. - - -if type vxdmpadm >/dev/null 2>&1; then - echo '<<>>' - vxdmpadm listenclosure all | grep -v -w -e ^[dD]isk -e ^other_disks -e ^ENCLR_NAME -e \^= - echo '<<>>' - ENCS=$( vxdmpadm listenclosure all | grep -v -w -e ^[dD]isk -e ENCLR_NAME -e \^= | awk '{print $1}') - - echo "$ENCS" | while read enc ; do - vxdmpadm getdmpnode enclosure=$enc | grep -v -e \^= -e NAME - done -fi - -if type vxdg >/dev/null 2>&1; then - echo '<<>>' - # Get a list of the in-use disk groups. - DGS=$(vxdg list | grep enabled | awk '{print $1}') - # Deported or otherwise inactive needs no performance monitoring - if [ "X${DGS}" != "X" ]; then - for DG in $DGS ; do - vxprint -g $DG -v -q -Q -F "%type %dgname %name %admin_state %kstate" - done - fi -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/plugins/websphere_mq b/ansible/roles/elnappo.check_mk_agent/files/plugins/websphere_mq deleted file mode 100755 index 1f2dc622..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/plugins/websphere_mq +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh -# Monitor Websphere MQ -# WWI Version 18.05.2016 -# +------------------------------------------------------------------+ -# | ____ _ _ __ __ _ __ | -# | / ___| |__ ___ ___| | __ | \/ | |/ / | -# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | -# | | |___| | | | __/ (__| < | | | | . \ | -# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | -# | | -# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | -# +------------------------------------------------------------------+ -# -# This file is part of Check_MK. -# The official homepage is at http://mathias-kettner.de/check_mk. -# -# check_mk is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation in version 2. check_mk is distributed -# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- -# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more de- -# tails. You should have received a copy of the GNU General Public -# License along with GNU Make; see the file COPYING. If not, write -# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, -# Boston, MA 02110-1301 USA. - -# plugin for websphere_mq_* checks - -if [ "$1" = "" ] -then - # wwi -------------------------------------- - # deletion of "-" - # preventing a faulty "TCP Connection" section of the main agent - # by using the "-" option, the AIX environment show's the message "[YOU HAVE NEW MAIL]" - # before the section start: <<>> - # so the next section will be useless for the OMD server check. - # su - mqm -c "/usr/lib/check_mk_agent/plugins/websphere_mq run" - su mqm -c "/usr/lib/check_mk_agent/plugins/websphere_mq run" - -else - # Loop over all local mq instances - for QM in $( ps -ef|grep [/]usr/mqm/bin/runmqlsr|awk -v FS="-m" '{print $2}'|awk '{print $1}'| uniq) - do - echo '<<>>' - for i in `echo " display CHANNEL (*) TYPE (SDR) " | /usr/bin/runmqsc $QM | grep CHLTYPE | \ - grep -v SYSTEM | awk '{print $1}'`; do - - j=`echo "display $i " | /usr/bin/runmqsc $QM | grep XMITQ | tr " " "\n" | \ - grep XMITQ | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'` - - a=`echo " display qlocal ($j) CURDEPTH " | /usr/bin/runmqsc $QM | grep CURDEPTH | \ - tr " " "\n" | grep CURDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| \ - awk '{print $2 }' | tr "\n" " "` - - c=`echo " display qlocal ($j) MAXDEPTH " | /usr/bin/runmqsc $QM | grep MAXDEPTH | \ - tr " " "\n" | grep MAXDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| \ - awk '{print $2 }' | tr "\n" " "` - - l=`echo $i | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'` - - s=`echo " display chstatus($l)" | /usr/bin/runmqsc $QM | grep STATUS | tail -1 | \ - sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $NF }'` - - if [ "$s" = "" ] - then - s="Unknown" - fi - echo "$a $i $c $s" - done - - echo '<<>>' - for t in `echo " display queue (*) where (USAGE EQ NORMAL) " | /usr/bin/runmqsc $QM | \ - grep -v SYSTEM | grep -v MQMON | grep -v MONITOR | grep -v _T0 | grep -v _T1 | \ - grep -v _T2 | grep -v _T3 | grep -v mqtest | grep QUEUE | awk '{ print $1 }' | \ - sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| awk '{print $2 }'`; do - - # wwi MQ admin change, to get more queues which are needed - a=`echo " display qlocal ($t) CURDEPTH " | /usr/bin/runmqsc $QM | grep CURDEPTH | \ - tr " " "\n" | grep CURDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| \ - awk '{print $2 }' | tr "\n" " "` - - b=`echo " display qlocal ($t) MAXDEPTH " | /usr/bin/runmqsc $QM | grep MAXDEPTH | \ - tr " " "\n" | grep MAXDEPTH | sed '1,$s/(/ /g' | sed '1,$s/)/ /g'| \ - awk '{print $2 }' | tr "\n" " "` - - c=`echo " dis qs($t) monitor " | /usr/bin/runmqsc $QM | grep -e LGETDATE -e LGETTIME | \ - tr '\n' ' '| awk -v FS="LPUTDATE" '{print $1}'|sed 's/ //g'` - - NOW=$(date +%Y_%m_%d"-"%H_%M_%S) - - # Muster: Anzahl eingehender Messages $a auf $t Max-Queues $b - # wwi ------------------------- - - if [ "$a" == "" ] || [ "$a" == " " ]; then - a=" 0" - t=" $t" - c="CURDEPTH(0)LGETDATE()LGETTIME()" - fi - - if [ "$b" == "" ]|| [ "$b" == " " ]; then - b=0 - c="CURDEPTH(0)LGETDATE()LGETTIME()" - fi - - if [ "$c" == "" ]|| [ "$c" == " " ]; then - c="CURDEPTH(0)LGETDATE()LGETTIME()" - fi - - echo "$a $t $b $c $NOW" - done # for t - done # for QM -fi - - -if type dspmq > /dev/null; then - echo "<<>>" - dspmq -x - echo "MQv"$(dspmqver|grep -e Version -e ^Level -e Mode|awk -v FS=":" '{print $2","}'|tr -d ' '|tr -d '\n'|sed 's/,$/;/g') - dspmq -o all -fi diff --git a/ansible/roles/elnappo.check_mk_agent/files/sudoers_check_mk_agent b/ansible/roles/elnappo.check_mk_agent/files/sudoers_check_mk_agent deleted file mode 100644 index dd85ce35..00000000 --- a/ansible/roles/elnappo.check_mk_agent/files/sudoers_check_mk_agent +++ /dev/null @@ -1 +0,0 @@ -checkmk_agent ALL = (root) NOPASSWD: /usr/bin/check_mk_agent diff --git a/ansible/roles/elnappo.check_mk_agent/handlers/main.yml b/ansible/roles/elnappo.check_mk_agent/handlers/main.yml deleted file mode 100644 index 4b6adb40..00000000 --- a/ansible/roles/elnappo.check_mk_agent/handlers/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# handlers file for check_mk agent -- name: Restart firewalld - service: - name: firewalld - state: restarted - -- name: Check_mk activate changes via WATO API - check_mk: - server_url: "{{ check_mk_agent_monitoring_host_url }}" - username: "{{ check_mk_agent_monitoring_host_wato_username }}" - secret: "{{ check_mk_agent_monitoring_host_wato_secret }}" - activate_changes: true - delegate_to: localhost diff --git a/ansible/roles/elnappo.check_mk_agent/library/check_mk.py b/ansible/roles/elnappo.check_mk_agent/library/check_mk.py deleted file mode 100644 index 177874ff..00000000 --- a/ansible/roles/elnappo.check_mk_agent/library/check_mk.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -ANSIBLE_METADATA = {'status': ['preview'], - 'supported_by': 'community', - 'version': '0.1'} - -DOCUMENTATION = ''' ---- -module: check_mk -short_description: Talk to check_mk API -description: - - Used to add, edit, and delete hosts via check_mk web API. - - Service discovery and changeset activation is also implemented. -version_added: "0.2" -author: "Fabian Weisshaar (@elnappo)" -options: - server_url: - description: - - URL of check_mk server, with protocol (http or https). - required: true - default: null - - username: - description: - - Check_mk username, used to authenticate against the server. - required: true - default: null - - secret: - description: - - Check_mk user secret. - required: true - default: null - - hostname: - description: - - Name of the host in check_mk. - required: false - default: null - - folder: - description: - - Description of the options goes here. - required: false - default: "" - - state: - description: - - Description of the options goes here. - required: false - default: present - choices: - - present - - absent - - discover_services: - description: - - Description of the options goes here. - required: false - default: null - choices: - - new - - remove - - fixall - - refresh - - activate_changes: - description: - - Description of the options goes here. - required: false - default: no - - attributes: - description: - - Description of the options goes here. - required: false - default: {} - - validate_certs: - description: - - Verify SSL certificate or not - required: false - default: True - -notes: - - Other things consumers of your module should know. - -requirements: - - requests >= 2.5.0 -''' - -EXAMPLES = ''' -- name: Add host to monitoring - check_mk: - hostname: {{ inventory_hostname }} - folder: os/linux - state: present - delegate_to: localhost - notify: check_mk activate changes - -- name: Add host to monitoring and discover services - check_mk: - hostname: {{ inventory_hostname }} - folder: dfd-inf - discover_services: refresh - state: present - delegate_to: localhost - notify: "check_mk activate changes" - -- name: Remove host from monitoring - check_mk: - hostname: {{ inventory_hostname }} - state: absent - delegate_to: localhost - notify: "check_mk activate changes" - -handlers: - - name: check_mk activate changes - check_mk: activate_changes=all -''' - -RETURN = ''' -dest: - description: destination file/path - returned: success - type: string - sample: /path/to/file.txt -''' -from ansible.module_utils.basic import AnsibleModule -from distutils.version import LooseVersion -import json - -try: - import requests - - REQUESTS_FOUND = True -except ImportError: - REQUESTS_FOUND = False - - -class CheckMKAPI(object): - def __init__(self, ansible_module): - self._module = ansible_module - self._api_url = self._module.params["server_url"] + "check_mk/webapi.py?_username=%s&_secret=%s" % (self._module.params["username"], self._module.params["secret"]) - self._session = requests.Session() - - if not self._module.params["server_url"].endswith("/"): - self._module.fail_json(msg="Server URL must end with / e.g. http://cmk.example.com/monitoring/") - - def _api_request(self, action, payload=None, fail_on_error=True): - try: - r = self._session.post(self._api_url + action, data=payload or {}, verify=self._module.params["validate_certs"]) - r.raise_for_status() - if r.json()["result_code"] != 0 and fail_on_error: - self._module.fail_json(msg=r.json()["result"]) - return r.json()["result"] - except getattr(json.decoder, 'JSONDecodeError', ValueError): - self._module.fail_json(msg=r.text, http_status_code=r.status_code, payload=payload) - except requests.exceptions.RequestException as err: - self._module.fail_json(msg=str(err), payload=payload) - - def get_host_attributes(self, hostname): - return self._api_request("&action=get_host&effective_attributes=1", {'hostname': hostname}) - - def add_host(self, hostname, folder, attributes=None): - payload = {'hostname': hostname, "folder": folder.lower(), 'attributes': attributes or {}} - return self._api_request("&action=add_host", "request="+json.dumps(payload)) - - def edit_host(self, hostname, attributes=None, unset_attributes=None): - payload = {"attributes": attributes, "hostname": hostname, "unset_attributes": unset_attributes or []} - return self._api_request("&action=edit_host","request="+json.dumps(payload)) - - def delete_host(self, hostname): - return self._api_request("&action=delete_host", {'hostname': hostname}) - - def discover_services(self, hostname, mode="new"): - return self._api_request("&action=discover_services&mode=%s" % mode, {'hostname': hostname}) - - def activate_changes(self, mode="dirty"): - return self._api_request("&action=activate_changes&mode=%s" % mode) - - def host_exists(self, hostname): - return self._api_request("&action=get_host&effective_attributes=1", {'hostname': hostname}, - False) != "Check_MK exception: No such host" - - -def main(): - argument_spec = dict( - server_url=dict(type="str", required=True), - username=dict(type="str", required=True, ), - secret=dict(type="str", required=True, no_log=True), - - hostname=dict(type="str"), - folder=dict(type="str", default=""), - attributes=dict(type="dict", default={}), - state=dict(type="str", choices=['present', 'absent'], default="present"), - validate_certs=dict(type="bool", default=True), - - discover_services=dict(type="str", choices=['new', 'remove', 'fixall', 'refresh']), - activate_changes=dict(type="bool") - ) - a_module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) - - if not REQUESTS_FOUND: - a_module.fail_json(msg='requests library is required for this module') - - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - a_module.fail_json(msg='requests library version should be >= 2.5.0') - - cmk = CheckMKAPI(a_module) - result = dict(changed=False) - - # add / delete host - if a_module.params["hostname"]: - host_exists = cmk.host_exists(a_module.params["hostname"]) - - if a_module.params["state"] == "present" and not host_exists: - result["changed"] = True - result["addhost"] = cmk.add_host(a_module.params["hostname"], a_module.params["folder"], a_module.params["attributes"]) - - if a_module.params["state"] == "absent" and host_exists: - result["changed"] = True - cmk.delete_host(a_module.params["hostname"]) - - # Adjust attributes - if a_module.params["hostname"] and host_exists and a_module.params["attributes"]: - result["changed"] = True - result["edit_host"] = cmk.edit_host(a_module.params["hostname"], a_module.params["attributes"]) - - # discover services - if a_module.params["discover_services"]: - if not a_module.params["hostname"]: - a_module.fail_json(msg='Hostname is required when using discover_services') - - result["changed"] = True - result["discover_services"] = cmk.discover_services(a_module.params["hostname"], a_module.params["discover_services"]) - - # activate changes - if a_module.params["activate_changes"]: - if result["changed"] == True: - result["activate_changes"] = cmk.activate_changes() - - a_module.exit_json(**result) - - -if __name__ == '__main__': - main() diff --git a/ansible/roles/elnappo.check_mk_agent/meta/.galaxy_install_info b/ansible/roles/elnappo.check_mk_agent/meta/.galaxy_install_info deleted file mode 100644 index c79b3434..00000000 --- a/ansible/roles/elnappo.check_mk_agent/meta/.galaxy_install_info +++ /dev/null @@ -1 +0,0 @@ -{install_date: 'Tue Apr 23 12:16:07 2019', version: v2.2.0} diff --git a/ansible/roles/elnappo.check_mk_agent/meta/main.yml b/ansible/roles/elnappo.check_mk_agent/meta/main.yml deleted file mode 100644 index 493812e7..00000000 --- a/ansible/roles/elnappo.check_mk_agent/meta/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -galaxy_info: - author: elnappo - description: Install check_mk agent - license: MIT - min_ansible_version: 2.3 - - platforms: - - name: Ubuntu - versions: - - xenial - - bionic - - name: Debian - versions: - - jessie - - stretch - - name: EL - versions: - - 7 - - galaxy_tags: - - monitoring - -dependencies: [] diff --git a/ansible/roles/elnappo.check_mk_agent/molecule/default/INSTALL.rst b/ansible/roles/elnappo.check_mk_agent/molecule/default/INSTALL.rst deleted file mode 100644 index b42edf5f..00000000 --- a/ansible/roles/elnappo.check_mk_agent/molecule/default/INSTALL.rst +++ /dev/null @@ -1,16 +0,0 @@ -******* -Docker driver installation guide -******* - -Requirements -============ - -* General molecule dependencies (see https://molecule.readthedocs.io/en/latest/installation.html) -* Docker Engine -* docker-py -* docker - -Install -======= - - $ sudo pip install docker-py diff --git a/ansible/roles/elnappo.check_mk_agent/molecule/default/molecule.yml b/ansible/roles/elnappo.check_mk_agent/molecule/default/molecule.yml deleted file mode 100644 index 73e01c59..00000000 --- a/ansible/roles/elnappo.check_mk_agent/molecule/default/molecule.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -dependency: - name: galaxy -driver: - name: docker -lint: - name: yamllint -platforms: - - name: instance - image: "geerlingguy/docker-${MOLECULE_DISTRO:-ubuntu1804}-ansible:latest" - command: ${MOLECULE_DOCKER_COMMAND:-""} - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup:ro - privileged: true - pre_build_image: true -provisioner: - name: ansible - playbooks: - prepare: prepare.yml - lint: - name: ansible-lint -scenario: - name: default -verifier: - name: testinfra - lint: - name: flake8 diff --git a/ansible/roles/elnappo.check_mk_agent/molecule/default/playbook.yml b/ansible/roles/elnappo.check_mk_agent/molecule/default/playbook.yml deleted file mode 100644 index c11c3a00..00000000 --- a/ansible/roles/elnappo.check_mk_agent/molecule/default/playbook.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Converge - hosts: all - pre_tasks: - - name: Install check_mk_agent > v1.4 - apt: - deb: "http://dns.lihas.de/debian/packages.buster/check-mk-agent_1.5.0p12-1_all.deb" - when: ansible_os_family == 'Debian' - - name: Install check_mk_agent > v1.4 - package: - name: check-mk-agent - when: ansible_os_family != 'Debian' - - roles: - - role: ansible-role-check-mk-agent - check_mk_agent_over_ssh: false - check_mk_agent_plugins_requirements: ["smartmontools"] - check_mk_agent_plugins: ["smart"] - check_mk_agent_manual_install: true diff --git a/ansible/roles/elnappo.check_mk_agent/molecule/default/prepare.yml b/ansible/roles/elnappo.check_mk_agent/molecule/default/prepare.yml deleted file mode 100644 index caf68512..00000000 --- a/ansible/roles/elnappo.check_mk_agent/molecule/default/prepare.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- name: Prepare - hosts: all - tasks: - - name: Retrieve new lists of packages and performs an upgrade - apt: - update_cache: true - upgrade: dist - autoremove: true - autoclean: true - cache_valid_time: 3600 - when: ansible_os_family == 'Debian' - - - name: Retrieve new lists of packages and performs an upgrade - pacman: - update_cache: true - upgrade: true - when: ansible_os_family == 'Archlinux' - - - name: Retrieve new lists of packages and performs an upgrade - yum: - name: '*' - update_cache: true - state: latest - when: ansible_os_family == 'RedHat' - - - name: Install dependencies - package: - name: "{{ item }}" - state: present - loop: - - ufw - - net-tools - when: ansible_os_family == 'Debian' - - - name: Install dependencies - package: - name: "{{ item }}" - state: present - loop: - - firewalld - - net-tools - when: ansible_os_family == 'RedHat' diff --git a/ansible/roles/elnappo.check_mk_agent/molecule/default/tests/test_default.py b/ansible/roles/elnappo.check_mk_agent/molecule/default/tests/test_default.py deleted file mode 100644 index 847da7c4..00000000 --- a/ansible/roles/elnappo.check_mk_agent/molecule/default/tests/test_default.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -import testinfra.utils.ansible_runner - -testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( - os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') - - -def test_check_mk_agent_server_is_installed(host): - check_mk_agent = host.package('check-mk-agent') - - assert check_mk_agent.is_installed - - -def test_check_mk_agent_socket_is_listening(host): - assert host.socket("tcp://0.0.0.0:6556").is_listening diff --git a/ansible/roles/elnappo.check_mk_agent/tasks/main.yml b/ansible/roles/elnappo.check_mk_agent/tasks/main.yml deleted file mode 100644 index a5e16166..00000000 --- a/ansible/roles/elnappo.check_mk_agent/tasks/main.yml +++ /dev/null @@ -1,152 +0,0 @@ ---- -# tasks file for check_mk agent -- name: Install check_mk_agent - package: - name: check-mk-agent - state: present - when: not check_mk_agent_manual_install - -- name: Install plugin requirements - package: - name: "{{ item }}" - state: present - with_items: "{{ check_mk_agent_plugins_requirements }}" - -- name: Create plugins repository - file: - path: /usr/lib/check_mk_agent/plugins/ - owner: root - group: root - state: directory - -- name: Copy plugins - copy: - src: plugins/{{ item }} - dest: /usr/lib/check_mk_agent/plugins/{{ item }} - owner: root - group: root - mode: 0755 - with_items: "{{ check_mk_agent_plugins }}" - -- name: Create cache time directories - file: - name: /usr/lib/check_mk_agent/local/{{ item.value.cache_time }} - state: directory - owner: root - group: root - mode: 0755 - with_dict: "{{ check_mk_agent_local_checks }}" - -- name: Copy local checks - copy: - src: "{{ item.value.src }}" - dest: /usr/lib/check_mk_agent/local/{{ item.value.cache_time | default(omit) }}/{{ item.key }} - owner: root - group: root - mode: 0755 - with_dict: "{{ check_mk_agent_local_checks }}" - -- name: systemd socket activation - block: - - name: Start and enable check_mk.socket (use systemd-socket) - systemd: - name: check_mk.socket - daemon_reload: true - state: started - enabled: true - - - name: Allow check_mk.socket (ufw) - ufw: - rule: allow - port: 6556 - proto: tcp - when: check_mk_agent_setup_firewall and ansible_os_family == "Debian" - - - name: Allow check_mk.socket (firewalld) - firewalld: - port: 6556/tcp - zone: public - permanent: true - state: enabled - when: check_mk_agent_setup_firewall and ansible_os_family == "RedHat" - notify: - - Restart firewalld - when: not check_mk_agent_over_ssh - -- name: Setup SSH key - authorized_key: - user: root - key_options: 'command="/usr/bin/check_mk_agent",no-pty,no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-user-rc' - key: "{{ lookup('file', check_mk_agent_pubkey_file) }}" - when: check_mk_agent_over_ssh and check_mk_agent_pubkey_file and not check_mk_agent_with_sudo - -- name: check_mk_agent with sudo - block: - - name: Add check_mk user for use with sudo - user: - name: checkmk_agent - system: true - home: /usr/lib/check_mk_agent/local - createhome: false - state: present - - - name: Allow checkmk_agent user to run /usr/bin/check_mk_agent with sudo - copy: - src: sudoers_check_mk_agent - dest: /etc/sudoers.d/check_mk_agent - - - name: Setup SSH key with sudo - authorized_key: - user: checkmk_agent - key_options: 'command="sudo /usr/bin/check_mk_agent",no-pty,no-agent-forwarding,no-port-forwarding,no-X11-forwarding,no-user-rc' - key: "{{ lookup('file', check_mk_agent_pubkey_file) }}" - when: check_mk_agent_over_ssh and check_mk_agent_pubkey_file and check_mk_agent_with_sudo - -- name: Add SSH host key - block: - - name: Scan SSH host pubkey - shell: ssh-keyscan -T 10 {{ inventory_hostname }} - changed_when: false - register: check_mk_agent_host_ssh_pubkey - tags: - - skip_ansible_lint - - - name: Add known_host entry to monitoring instance - known_hosts: - name: "{{ inventory_hostname }}" - key: "{{ item }}" - state: present - with_items: "{{ check_mk_agent_host_ssh_pubkey.stdout_lines }}" - - when: check_mk_agent_over_ssh and check_mk_agent_add_host_pubkey - delegate_to: "{{ check_mk_agent_monitoring_host }}" - become_user: "{{ check_mk_agent_monitoring_user }}" - become: true - -- name: Add host to check_mk instance via WATO API - check_mk: - server_url: "{{ check_mk_agent_monitoring_host_url }}" - username: "{{ check_mk_agent_monitoring_host_wato_username }}" - secret: "{{ check_mk_agent_monitoring_host_wato_secret }}" - hostname: "{{ inventory_hostname }}" - folder: "{{ check_mk_agent_monitoring_host_folder }}" - state: present - when: check_mk_agent_add_to_wato - register: check_mk_agent_add_host_wato - delegate_to: localhost - tags: - - skip_ansible_lint - -- name: Discover services via WATO API - check_mk: - server_url: "{{ check_mk_agent_monitoring_host_url }}" - username: "{{ check_mk_agent_monitoring_host_wato_username }}" - secret: "{{ check_mk_agent_monitoring_host_wato_secret }}" - hostname: "{{ inventory_hostname }}" - discover_services: "{{ check_mk_agent_monitoring_host_discovery_mode }}" - when: check_mk_agent_add_host_wato.changed - notify: - - Check_mk activate changes via WATO API - delegate_to: localhost - tags: - - skip_ansible_lint diff --git a/ansible/roles/elnappo.check_mk_agent/test.yml b/ansible/roles/elnappo.check_mk_agent/test.yml deleted file mode 100644 index b9a16645..00000000 --- a/ansible/roles/elnappo.check_mk_agent/test.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- hosts: localhost - remote_user: root - roles: - - { role: ansible-role-check-mk-agent } diff --git a/ansible/roles/k8s/files/check-mk-agent-socket.unit b/ansible/roles/k8s/files/check-mk-agent-socket.unit deleted file mode 100644 index 0e15ba42..00000000 --- a/ansible/roles/k8s/files/check-mk-agent-socket.unit +++ /dev/null @@ -1,10 +0,0 @@ -# systemd socket definition file -[Unit] -Description=Check_MK Agent Socket - -[Socket] -ListenStream=6556 -Accept=true - -[Install] -WantedBy=sockets.target diff --git a/ansible/roles/k8s/files/check-mk-service.unit b/ansible/roles/k8s/files/check-mk-service.unit deleted file mode 100644 index a721813e..00000000 --- a/ansible/roles/k8s/files/check-mk-service.unit +++ /dev/null @@ -1,12 +0,0 @@ -# systemd service definition file -[Unit] -Description=Check_MK - -[Service] -ExecStart=/usr/bin/check_mk_agent -KillMode=process - -User=root -Group=root - -StandardInput=socket diff --git a/ansible/roles/k8s/tasks/main.yml b/ansible/roles/k8s/tasks/main.yml deleted file mode 100644 index 790ead12..00000000 --- a/ansible/roles/k8s/tasks/main.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- name: Update all packages to the latest version - apt: - update_cache: yes - upgrade: dist -- name: Install packages - apt: - name: "{{ packages }}" - vars: - packages: - - zsh - - iotop - - latencytop - - apt-transport-https - - ca-certificates - - curl - - software-properties-common - - check-mk-agent - -- name: Add Docker GPG key - apt_key: url=https://download.docker.com/linux/ubuntu/gpg - -- name: Add Docker APT repository - apt_repository: - repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ansible_distribution_release}} stable - -- name: Install list of packages - apt: - name: docker-ce - state: present - update_cache: yes - -- name: Add ubuntu to docker group - user: - name: ubuntu - groups: docker - append: yes - -- name: Add checkmk socket activation - copy: - src: ../files/check-mk-agent-socket.unit - dest: /lib/systemd/system/check_mk.socket - owner: root - mode: '0644' - -- name: Add checkmk service file - copy: - src: ../files/check-mk-service.unit - dest: /lib/systemd/system/check_mk@.service - owner: root - mode: '0644' diff --git a/ansible/roles/proxmox/files/sysctl/zentralwerk.conf b/ansible/roles/proxmox/files/sysctl/zentralwerk.conf deleted file mode 100644 index 5fc48497..00000000 --- a/ansible/roles/proxmox/files/sysctl/zentralwerk.conf +++ /dev/null @@ -1,2 +0,0 @@ - # for elastic - vm.max_map_count=262144 \ No newline at end of file diff --git a/ansible/roles/proxmox/files/sysfs/fix_ipv6.conf b/ansible/roles/proxmox/files/sysfs/fix_ipv6.conf deleted file mode 100644 index d8c7bef2..00000000 --- a/ansible/roles/proxmox/files/sysfs/fix_ipv6.conf +++ /dev/null @@ -1,3 +0,0 @@ -# disable multicast snooping otherwise nested ipv6 containers -# will not find gateways and the docker bridge will not get propagated upstream -class/net/vmbr0/bridge/multicast_snooping = 0 diff --git a/ansible/roles/proxmox/tasks/main.yml b/ansible/roles/proxmox/tasks/main.yml deleted file mode 100644 index e7c8a16d..00000000 --- a/ansible/roles/proxmox/tasks/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: Update all packages to the latest version - apt: - upgrade: dist -- name: Install packages - apt: - name: "{{ packages }}" - vars: - packages: - - ethtool - - gddrescue - - git - - htop - - hwinfo - - ifupdown2 - - iotop - - iperf - - ipmitool - - ipmiutil - - iproute2 - - iptraf - - jq - - kpartx - - ntp - - ntpdate - - latencytop - - screen - - tcpdump - - tmux - - zsh - -- name: Adjust sysctl values - copy: - src: ../files/sysctl/zentralwerk.conf - dest: /etc/sysctl.d/zentralwerk.conf - register: sysctl - -- name: Apply sysctl - shell: "systemctl restart systemd-sysctl.service" - when: sysctl.changed - -- name: Cloning oh-my-zsh - git: - repo=https://github.com/robbyrussell/oh-my-zsh - dest=~/.oh-my-zsh - -- name: Link ~/.zshrc - file: - src: /etc/pve/zshrc - dest: ~/.zshrc - state: link - force: yes - -- name: change user shell to zsh - become: yes - user: - name: root - shell: /bin/zsh diff --git a/hq.nixops b/hq.nixops deleted file mode 100644 index 7b63de7d..00000000 --- a/hq.nixops +++ /dev/null @@ -1,118 +0,0 @@ -{ - network.description = "C3D2 HQ"; - - "mucbot" = - { ... }: - { - imports = [ - hosts/containers/mucbot/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:282:28db:dff:fe6b:e89a"; - storeKeysOnMachine = true; - }; - }; - - "public-access-proxy" = - { ... }: - { - imports = [ - hosts/containers/public-access-proxy/configuration.nix - ]; - deployment = { - targetHost = "172.20.73.45"; - storeKeysOnMachine = true; - }; - }; - "elastic1" = - { ... }: - { - imports = [ - hosts/containers/elastic/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:282:e0d5:d8ff:fe54:586c"; - storeKeysOnMachine = true; - }; - }; - "logging" = - { ... }: - { - imports = [ - hosts/containers/logging/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:282:6811:edff:fe40:89c6"; - storeKeysOnMachine = true; - }; - }; - "storage-ng" = - { ... }: - { - imports = [ - hosts/storage-ng/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:223::20"; - storeKeysOnMachine = true; - }; - }; - "mongo" = - { ... }: - { - imports = [ - hosts/containers/mongo/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:282:5038:2aff:feba:7d3b"; - storeKeysOnMachine = true; - }; - }; - "registry" = - { ... }: - { - imports = [ - hosts/containers/registry/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:223::34"; - storeKeysOnMachine = true; - }; - }; - "prometheus" = - { ... }: - { - imports = [ - hosts/containers/prometheus/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:282:8c46:d6ff:fe43:6afd"; - storeKeysOnMachine = true; - }; - }; - - "spaceapi" = - { ... }: - { - imports = [ - hosts/containers/spaceapi/configuration.nix - ]; - deployment = { - targetHost = "2a00:8180:2c00:282:1457:adff:fe93:62e9"; - storeKeysOnMachine = true; - }; - }; - - # Run with: - # -I nixpkgs=https://nixos.org/channels/nixos-unstable/nixexprs.tar.xz - "dnscache" = { - imports = [ - hosts/containers/dnscache/configuration.nix - ]; - deployment = { - targetHost = "dnscache.serv.zentralwerk.org"; - storeKeysOnMachine = true; - }; - }; - -} diff --git a/kubernetes/cluster.yml b/kubernetes/cluster.yml deleted file mode 100644 index 090ece94..00000000 --- a/kubernetes/cluster.yml +++ /dev/null @@ -1,105 +0,0 @@ -nodes: - - address: k8s-1.hq.c3d2.de - user: ubuntu - role: - - controlplane - - etcd - - address: k8s-2.hq.c3d2.de - user: ubuntu - role: - - controlplane - - etcd - - worker - - address: k8s-3.hq.c3d2.de - user: ubuntu - role: - - controlplane - - etcd - - worker - -ssh_agent_auth: true -cluster_name: chaoscluster -kubernetes_version: v1.13.5-rancher1-2 - -services: - etcd: - - kube-api: - # IP range for any services created on Kubernetes - # This must match the service_cluster_ip_range in kube-controller - service_cluster_ip_range: 10.43.0.0/16 - # Expose a different port range for NodePort services - service_node_port_range: 30000-32767 - pod_security_policy: false - # Add additional arguments to the kubernetes API server - # This WILL OVERRIDE any existing defaults - #extra_args: - # # Enable audit log to stdout - # audit-log-path: "-" - # Note for Rancher 2 users: If you are configuring Cluster Options using a Config File when creating Rancher Launched Kubernetes, the names of services should contain underscores only: `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. - kube-controller: - # CIDR pool used to assign IP addresses to pods in the cluster - cluster_cidr: 10.42.0.0/16 - # IP range for any services created on Kubernetes - # This must match the service_cluster_ip_range in kube-api - service_cluster_ip_range: 10.43.0.0/16 - kubelet: - # Base domain for the cluster - cluster_domain: k8s.hq.c3d2.de - # IP address for the DNS service endpoint - cluster_dns_server: 10.43.0.10 - # Fail if swap is on - fail_swap_on: false - # Set max pods to 250 instead of default 110 - extra_args: - max-pods: 250 - # Optionally define additional volume binds to a service - #extra_binds: - # - "/usr/libexec/kubernetes/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins" - -# Currently, only authentication strategy supported is x509. -# You can optionally create additional SANs (hostnames or IPs) to add to -# the API server PKI certificate. -# This is useful if you want to use a load balancer for the control plane servers. -authentication: - strategy: x509 - sans: - - "k8s.hq.c3d2.de" - - "172.22.99.13" - - "172.22.99.15" - - "172.22.99.16" - -# Kubernetes Authorization mode -# Use `mode: rbac` to enable RBAC -# Use `mode: none` to disable authorization -authorization: - mode: none - -# Add-ons are deployed using kubernetes jobs. RKE will give up on trying to get the job status after this timeout in seconds.. -addon_job_timeout: 30 - -# Currently only nginx ingress provider is supported. -# To disable ingress controller, set `provider: none` - -ingress: - provider: nginx - -# All add-on manifests MUST specify a namespace -addons: |- - --- - apiVersion: v1 - kind: Pod - metadata: - name: my-nginx - namespace: default - spec: - containers: - - name: my-nginx - image: nginx - ports: - - containerPort: 80 - -# addons_include: -# - https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/rook-operator.yaml -# - https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/rook-cluster.yaml -# - /path/to/manifest