Skip to content
Snippets Groups Projects
Commit 530ddad1 authored by Zitnik, Anze's avatar Zitnik, Anze
Browse files

Merge branch 'updated-error-handling' into 'master'

Updated error handling and tests

See merge request medina/evidence-collector!10
parents a7951bf4 037167ff
No related branches found
No related tags found
No related merge requests found
...@@ -24,11 +24,14 @@ test: ...@@ -24,11 +24,14 @@ test:
stage: test stage: test
script: script:
- apk add bash - apk add bash
- docker stop $SERVICE || true && docker rm $SERVICE || true - docker network create test-ec
- docker run --env-file .env --name $SERVICE -d $REGISTRY/medina/$SERVICE:$VERSION - docker run --rm --network=test-ec --env-file .env --name $SERVICE -d $REGISTRY/medina/$SERVICE:$VERSION
- sleep 5 - docker run --rm --network=test-ec toschneck/wait-for-it $SERVICE:7890 -t 240
- bash test/test.sh - bash test/test.sh
- docker stop $SERVICE && docker container rm $SERVICE after_script:
- SERVICE=$(grep SERVICE MANIFEST | cut -d '=' -f2)
- docker kill $SERVICE || docker network rm test-ec
- docker network rm test-ec
push: push:
stage: push stage: push
......
...@@ -9,6 +9,6 @@ RUN pip3 install -r requirements.txt ...@@ -9,6 +9,6 @@ RUN pip3 install -r requirements.txt
COPY . . COPY . .
RUN apt-get update && apt-get install -y redis-server RUN apt-get update && apt-get install -y redis-server netcat
ENTRYPOINT ["./entrypoint.sh"] ENTRYPOINT ["./entrypoint.sh"]
VERSION=v0.0.11 VERSION=v0.0.12
SERVICE=evidence-collector SERVICE=evidence-collector
...@@ -8,4 +8,8 @@ rqscheduler --host $redis_host --port $redis_port & ...@@ -8,4 +8,8 @@ rqscheduler --host $redis_host --port $redis_port &
python3 -m scheduler.scheduler python3 -m scheduler.scheduler
# open a listener on port 7890 for 1 second
# only for testing - CI script contains a wait-for-it that binds to this port
nc -l -p 7890 -w 1
tail -f /var/log/evidence_collector.log tail -f /var/log/evidence_collector.log
...@@ -30,6 +30,8 @@ class ClouditorAuthentication(object): ...@@ -30,6 +30,8 @@ class ClouditorAuthentication(object):
urllib3.exceptions.MaxRetryError, requests.exceptions.ConnectionError) as err: urllib3.exceptions.MaxRetryError, requests.exceptions.ConnectionError) as err:
self.logger.error(err) self.logger.error(err)
self.logger.error("Clouditor OAuth2 token endpoint not available") self.logger.error("Clouditor OAuth2 token endpoint not available")
self.__access_token = None
self.__token_expiration_time = None
else: else:
token = json.loads(access_token_response.text) token = json.loads(access_token_response.text)
...@@ -41,7 +43,7 @@ class ClouditorAuthentication(object): ...@@ -41,7 +43,7 @@ class ClouditorAuthentication(object):
def get_token(self): def get_token(self):
# In practice this condition isn't even needed as every scheduled job creates new ClouditorAuthentication object and acquires new token. # In practice this condition isn't even needed as every scheduled job creates new ClouditorAuthentication object and acquires new token.
if (datetime.utcnow() > self.__token_expiration_time): if (self.__token_expiration_time != None and datetime.utcnow() > self.__token_expiration_time):
self.logger.debug("OAuth2 token expired.") self.logger.debug("OAuth2 token expired.")
self.request_token() self.request_token()
......
...@@ -14,7 +14,10 @@ class ForwardEvidence(object): ...@@ -14,7 +14,10 @@ class ForwardEvidence(object):
def send_evidence(self, assessevidencerequest, token): def send_evidence(self, assessevidencerequest, token):
try: try:
if token is not None:
metadata = [('authorization', 'Bearer ' + token)] metadata = [('authorization', 'Bearer ' + token)]
else:
metadata = None
response = self.stub.AssessEvidence(assessevidencerequest, metadata=metadata) response = self.stub.AssessEvidence(assessevidencerequest, metadata=metadata)
self.logger.info('gRPC evidence forwarded: ' + str(response)) self.logger.info('gRPC evidence forwarded: ' + str(response))
......
...@@ -6,6 +6,7 @@ redis2="Ready to accept connections" ...@@ -6,6 +6,7 @@ redis2="Ready to accept connections"
scheduler="Registering birth" scheduler="Registering birth"
worker1="Worker rq:worker:" worker1="Worker rq:worker:"
worker2="Listening on " worker2="Listening on "
oauth2token="Clouditor OAuth2 token endpoint not available"
if ! [[ $logs =~ $redis1 ]] if ! [[ $logs =~ $redis1 ]]
then then
...@@ -36,3 +37,9 @@ if ! [[ $logs =~ $worker2 ]] ...@@ -36,3 +37,9 @@ if ! [[ $logs =~ $worker2 ]]
echo "Redis worker not started" 1>&2 echo "Redis worker not started" 1>&2
exit 1 exit 1
fi fi
if ! [[ $logs =~ $oauth2token ]]
then
echo "OAuth2 token authentication not working" 1>&2
exit 1
fi
from wazuh_evidence_collector.wazuh_client import WazuhClient from wazuh_evidence_collector.wazuh_client import WazuhClient
from elasticsearch import Elasticsearch import elasticsearch
import urllib3
from elasticsearch_dsl import Search from elasticsearch_dsl import Search
class Checker: class Checker:
def __init__(self, wc, es): def __init__(self, wc, es, logger):
self.wc = wc self.wc = wc
self.es = es self.es = es
self.logger = logger
# Check if syscheck enabled # Check if syscheck enabled
def check_syscheck(self, agent): def check_syscheck(self, agent):
...@@ -92,7 +94,14 @@ class Checker: ...@@ -92,7 +94,14 @@ class Checker:
.query("match", rule__description="Clamd restarted") \ .query("match", rule__description="Clamd restarted") \
.query("match", agent__id=agent[0]) .query("match", agent__id=agent[0])
try:
body = s.execute().to_dict() body = s.execute().to_dict()
except (elasticsearch.exceptions.ConnectionError, TimeoutError, urllib3.exceptions.NewConnectionError,
urllib3.exceptions.MaxRetryError) as err:
self.logger.error(err)
self.logger.error("Elasticsearch not available")
return None, False
measurement_result = len(body['hits']['hits']) > 0 measurement_result = len(body['hits']['hits']) > 0
......
...@@ -3,12 +3,13 @@ import urllib3 ...@@ -3,12 +3,13 @@ import urllib3
class WazuhClient: class WazuhClient:
def __init__(self, ip, port, username, password): def __init__(self, ip, port, username, password, logger):
self._ip = ip self._ip = ip
self._port = port self._port = port
self._username = username self._username = username
self._password = password self._password = password
self._auth_token = None self._auth_token = None
self.logger = logger
def req(self, method, resource, data=None, headers={}, auth_retry=True): def req(self, method, resource, data=None, headers={}, auth_retry=True):
# TODO: add cert verification # TODO: add cert verification
...@@ -19,7 +20,13 @@ class WazuhClient: ...@@ -19,7 +20,13 @@ class WazuhClient:
if self._auth_token: if self._auth_token:
headers['Authorization'] = 'Bearer %s' % self._auth_token headers['Authorization'] = 'Bearer %s' % self._auth_token
try:
resp = c.request(method, url, headers=headers, body=data) resp = c.request(method, url, headers=headers, body=data)
except (TimeoutError, urllib3.exceptions.NewConnectionError,
urllib3.exceptions.MaxRetryError) as err:
self.logger.error(err)
self.logger.error("Wazuh manager not available")
if resp.status == 401: if resp.status == 401:
if not auth_retry: if not auth_retry:
raise Exception("Authentication Error") raise Exception("Authentication Error")
......
...@@ -27,7 +27,7 @@ ELASTIC_USERNAME = os.environ.get("elastic_username") ...@@ -27,7 +27,7 @@ ELASTIC_USERNAME = os.environ.get("elastic_username")
ELASTIC_PASSWORD = os.environ.get("elastic_password") ELASTIC_PASSWORD = os.environ.get("elastic_password")
if not DEMO: if not DEMO:
wc = WazuhClient(WAZUH_HOST, WAZUH_PORT, WAZUH_USERNAME, WAZUH_PASSWORD) wc = WazuhClient(WAZUH_HOST, WAZUH_PORT, WAZUH_USERNAME, WAZUH_PASSWORD, LOGGER)
es = Elasticsearch( es = Elasticsearch(
ELASTIC_HOST, ELASTIC_HOST,
...@@ -69,7 +69,7 @@ def main(): ...@@ -69,7 +69,7 @@ def main():
# Wrapper function that runs all the checks (for every manager/agent) # Wrapper function that runs all the checks (for every manager/agent)
def run_collector(): def run_collector():
checker = Checker(wc, es) if not DEMO else DemoChecker() checker = Checker(wc, es, LOGGER) if not DEMO else DemoChecker()
# Get list of all agent ids (including manager's) # Get list of all agent ids (including manager's)
def get_agents(wc): def get_agents(wc):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment