Merge pull request 'create json log, clean logging up' (#25) from oglog into main
Reviewed-on: #25oglog2
commit
52297f2815
|
@ -1,3 +1,9 @@
|
|||
ogagent (3.3.0-1) stable; urgency=medium
|
||||
|
||||
* Create an additional json log file
|
||||
|
||||
-- OpenGnsys developers <info@opengnsys.es> Mon, 14 Apr 2025 13:50:32 +0200
|
||||
|
||||
ogagent (3.2.0-1) stable; urgency=medium
|
||||
|
||||
* Operating system: periodically ping ogcore
|
||||
|
|
|
@ -1 +1 @@
|
|||
3.2.0
|
||||
3.3.0
|
||||
|
|
|
@ -58,8 +58,12 @@ class ConnectionError(RESTError):
|
|||
# Disable warnings log messages
|
||||
try:
|
||||
import urllib3 # @UnusedImport
|
||||
requests_log = logging.getLogger ('urllib3')
|
||||
requests_log.setLevel (logging.INFO)
|
||||
except Exception:
|
||||
from requests.packages import urllib3 # @Reimport
|
||||
requests_log = logging.getLogger ('requests.packages.urllib3')
|
||||
requests_log.setLevel (logging.INFO)
|
||||
|
||||
try:
|
||||
urllib3.disable_warnings() # @UndefinedVariable
|
||||
|
@ -148,7 +152,9 @@ class REST(object):
|
|||
raise Exception (f'response content-type is not "application/json" but "{ct}"')
|
||||
r = json.loads(r.content) # Using instead of r.json() to make compatible with old requests lib versions
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ConnectionError(e)
|
||||
code = e.response.status_code
|
||||
logger.warning (f'request failed, HTTP code "{code}"')
|
||||
return None
|
||||
except Exception as e:
|
||||
raise ConnectionError(exceptionToMessage(e))
|
||||
|
||||
|
@ -160,13 +166,13 @@ class REST(object):
|
|||
@param data: if None or omitted, message will be a GET, else it will send a POST
|
||||
@param processData: if True, data will be serialized to json before sending, else, data will be sent as "raw"
|
||||
"""
|
||||
logger.debug('Invoking post message {} with data {}'.format(msg, data))
|
||||
#logger.debug('Invoking post message {} with data {}'.format(msg, data))
|
||||
|
||||
if processData and data is not None:
|
||||
data = json.dumps(data)
|
||||
|
||||
url = self._getUrl(msg)
|
||||
logger.debug('Requesting {}'.format(url))
|
||||
#logger.debug('Requesting {}'.format(url))
|
||||
|
||||
try:
|
||||
res = self._request(url, data)
|
||||
|
|
|
@ -34,6 +34,8 @@ import logging
|
|||
import os
|
||||
import tempfile
|
||||
|
||||
from ..log_format import JsonFormatter
|
||||
|
||||
# Logging levels
|
||||
OTHER, DEBUG, INFO, WARN, ERROR, FATAL = (10000 * (x + 1) for x in range(6))
|
||||
|
||||
|
@ -48,15 +50,25 @@ class LocalLogger(object):
|
|||
|
||||
for logDir in ('/var/log', os.path.expanduser('~'), tempfile.gettempdir()):
|
||||
try:
|
||||
fname = os.path.join(logDir, 'opengnsys.log')
|
||||
logging.basicConfig(
|
||||
filename=fname,
|
||||
filemode='a',
|
||||
format='%(levelname)s %(asctime)s (%(threadName)s) (%(funcName)s) %(message)s',
|
||||
level=logging.DEBUG
|
||||
)
|
||||
self.logger = logging.getLogger('opengnsys')
|
||||
os.chmod(fname, 0o0600)
|
||||
fname1 = os.path.join (logDir, 'opengnsys.log')
|
||||
fmt1 = logging.Formatter (fmt='%(levelname)s %(asctime)s (%(threadName)s) (%(funcName)s) %(message)s')
|
||||
fh1 = logging.FileHandler (filename=fname1, mode='a')
|
||||
fh1.setFormatter (fmt1)
|
||||
fh1.setLevel (logging.DEBUG)
|
||||
|
||||
fname2 = os.path.join (logDir, 'opengnsys.json.log')
|
||||
fmt2 = JsonFormatter ({"timestamp": "asctime", "severity": "levelname", "threadName": "threadName", "function": "funcName", "message": "message"}, time_format='%Y-%m-%d %H:%M:%S', msec_format='')
|
||||
fh2 = logging.FileHandler (filename=fname2, mode='a')
|
||||
fh2.setFormatter (fmt2)
|
||||
fh2.setLevel (logging.DEBUG)
|
||||
|
||||
self.logger = logging.getLogger ('opengnsys')
|
||||
self.logger.setLevel (logging.DEBUG)
|
||||
self.logger.addHandler (fh1)
|
||||
self.logger.addHandler (fh2)
|
||||
|
||||
os.chmod (fname1, 0o0600)
|
||||
os.chmod (fname2, 0o0600)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
import json
|
||||
import logging
|
||||
|
||||
class JsonFormatter(logging.Formatter):
|
||||
"""
|
||||
Formatter that outputs JSON strings after parsing the LogRecord.
|
||||
|
||||
@param dict fmt_dict: Key: logging format attribute pairs. Defaults to {"message": "message"}.
|
||||
@param str time_format: time.strftime() format string. Default: "%Y-%m-%dT%H:%M:%S"
|
||||
@param str msec_format: Microsecond formatting. Appended at the end. Default: "%s.%03dZ"
|
||||
"""
|
||||
def __init__(self, fmt_dict: dict = None, time_format: str = "%Y-%m-%dT%H:%M:%S", msec_format: str = "%s.%03dZ"):
|
||||
self.fmt_dict = fmt_dict if fmt_dict is not None else {"message": "message"}
|
||||
self.default_time_format = time_format
|
||||
self.default_msec_format = msec_format
|
||||
self.datefmt = None
|
||||
|
||||
def usesTime(self) -> bool:
|
||||
"""
|
||||
Overwritten to look for the attribute in the format dict values instead of the fmt string.
|
||||
"""
|
||||
return "asctime" in self.fmt_dict.values()
|
||||
|
||||
def formatMessage(self, record) -> dict:
|
||||
"""
|
||||
Overwritten to return a dictionary of the relevant LogRecord attributes instead of a string.
|
||||
KeyError is raised if an unknown attribute is provided in the fmt_dict.
|
||||
"""
|
||||
return {fmt_key: record.__dict__[fmt_val] for fmt_key, fmt_val in self.fmt_dict.items()}
|
||||
|
||||
def format(self, record) -> str:
|
||||
"""
|
||||
Mostly the same as the parent's class method, the difference being that a dict is manipulated and dumped as JSON
|
||||
instead of a string.
|
||||
"""
|
||||
record.message = record.getMessage()
|
||||
|
||||
if self.usesTime():
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
message_dict = self.formatMessage(record)
|
||||
|
||||
if record.exc_info:
|
||||
# Cache the traceback text to avoid converting it multiple times
|
||||
# (it's constant anyway)
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
|
||||
if record.exc_text:
|
||||
message_dict["exc_info"] = record.exc_text
|
||||
|
||||
if record.stack_info:
|
||||
message_dict["stack_info"] = self.formatStack(record.stack_info)
|
||||
|
||||
return json.dumps(message_dict, default=str)
|
|
@ -36,6 +36,8 @@ import logging
|
|||
import os
|
||||
import tempfile
|
||||
|
||||
from ..log_format import JsonFormatter
|
||||
|
||||
# Valid logging levels, from UDS Broker (uds.core.utils.log)
|
||||
OTHER, DEBUG, INFO, WARN, ERROR, FATAL = (10000 * (x + 1) for x in range(6))
|
||||
|
||||
|
@ -44,13 +46,24 @@ class LocalLogger(object):
|
|||
def __init__(self):
|
||||
# tempdir is different for "user application" and "service"
|
||||
# service wil get c:\windows\temp, while user will get c:\users\XXX\appdata\local\temp
|
||||
logging.basicConfig(
|
||||
filename=os.path.join(tempfile.gettempdir(), 'opengnsys.log'),
|
||||
filemode='a',
|
||||
format='%(levelname)s %(asctime)s (%(threadName)s) (%(funcName)s) %(message)s',
|
||||
level=logging.DEBUG
|
||||
)
|
||||
|
||||
fname1 = os.path.join (tempfile.gettempdir(), 'opengnsys.log')
|
||||
fmt1 = logging.Formatter (fmt='%(levelname)s %(asctime)s (%(threadName)s) (%(funcName)s) %(message)s')
|
||||
fh1 = logging.FileHandler (filename=fname1, mode='a')
|
||||
fh1.setFormatter (fmt1)
|
||||
fh1.setLevel (logging.DEBUG)
|
||||
|
||||
fname2 = os.path.join (tempfile.gettempdir(), 'opengnsys.json.log')
|
||||
fmt2 = JsonFormatter ({"timestamp": "asctime", "severity": "levelname", "threadName": "threadName", "function": "funcName", "message": "message"}, time_format='%Y-%m-%d %H:%M:%S', msec_format='')
|
||||
fh2 = logging.FileHandler (filename=fname2, mode='a')
|
||||
fh2.setFormatter (fmt2)
|
||||
fh2.setLevel (logging.DEBUG)
|
||||
|
||||
self.logger = logging.getLogger('opengnsys')
|
||||
self.logger.setLevel (logging.DEBUG)
|
||||
self.logger.addHandler (fh1)
|
||||
self.logger.addHandler (fh2)
|
||||
|
||||
self.serviceLogger = False
|
||||
|
||||
def log(self, level, message):
|
||||
|
|
|
@ -198,7 +198,7 @@ class ogLiveWorker(ServerWorker):
|
|||
progress = None
|
||||
for i in ary:
|
||||
if m := re.search (r'^\[([0-9]+)\]', i): ## look for strings like '[10]', '[60]'
|
||||
logger.debug (f"matched regex, m.groups ({m.groups()})")
|
||||
#logger.debug (f"matched regex, m.groups ({m.groups()})")
|
||||
progress = float (m.groups()[0]) / 100
|
||||
return progress
|
||||
|
||||
|
@ -211,7 +211,7 @@ class ogLiveWorker(ServerWorker):
|
|||
for k in self.thread_list:
|
||||
elem = self.thread_list[k]
|
||||
if 'thread' not in elem: continue
|
||||
logger.debug (f'considering thread ({k})')
|
||||
#logger.debug (f'considering thread ({k})')
|
||||
|
||||
if self.pid_q:
|
||||
if not self.pid_q.empty():
|
||||
|
@ -240,11 +240,20 @@ class ogLiveWorker(ServerWorker):
|
|||
time.sleep (1)
|
||||
n += 1
|
||||
if not n % 10:
|
||||
alive_threads = []
|
||||
for k in self.thread_list:
|
||||
elem = self.thread_list[k]
|
||||
if 'thread' not in elem: continue
|
||||
alive_threads.append (k)
|
||||
if alive_threads:
|
||||
s = ','.join (alive_threads)
|
||||
logger.debug (f'alive threads: {s}')
|
||||
|
||||
body = {
|
||||
'iph': self.IPlocal,
|
||||
'timestamp': int (time.time()),
|
||||
}
|
||||
logger.debug (f'about to send ping ({body})')
|
||||
#logger.debug (f'about to send ping ({body})')
|
||||
self.REST.sendMessage ('clients/status/webhook', body)
|
||||
|
||||
def interfaceAdmin (self, method, parametros=[]):
|
||||
|
@ -277,7 +286,8 @@ class ogLiveWorker(ServerWorker):
|
|||
self.pid_q.put (p.pid)
|
||||
else:
|
||||
## esto sucede por ejemplo cuando arranca el agente, que estamos en interfaceAdmin() en el mismo hilo, sin _long_running_job ni hilo separado
|
||||
logger.debug ('no queue--not writing any PID to it')
|
||||
#logger.debug ('no queue--not writing any PID to it')
|
||||
pass
|
||||
|
||||
sout = serr = ''
|
||||
while p.poll() is None:
|
||||
|
@ -293,12 +303,12 @@ class ogLiveWorker(ServerWorker):
|
|||
serr = serr.strip()
|
||||
|
||||
## DEBUG
|
||||
logger.info (f'stdout follows:')
|
||||
logger.debug (f'stdout follows:')
|
||||
for l in sout.splitlines():
|
||||
logger.info (f' {l}')
|
||||
logger.info (f'stderr follows:')
|
||||
for l in serr.splitlines():
|
||||
logger.info (f' {l}')
|
||||
logger.debug (f' {l}')
|
||||
#logger.debug (f'stderr follows:')
|
||||
#for l in serr.splitlines():
|
||||
# logger.debug (f' {l}')
|
||||
## /DEBUG
|
||||
if 0 != p.returncode:
|
||||
cmd_txt = ' '.join (proc)
|
||||
|
@ -337,9 +347,7 @@ class ogLiveWorker(ServerWorker):
|
|||
res = self.REST.sendMessage ('/'.join ([self.name, path]), obj)
|
||||
|
||||
if (type (res) is not dict):
|
||||
#logger.error ('No se ha podido establecer conexión con el Servidor de Administración') ## Error de conexión con el servidor
|
||||
logger.debug (f'res ({res})')
|
||||
logger.error ('Error al enviar trama ***send() fallo')
|
||||
logger.error (f'response is not a dict ({res})')
|
||||
return False
|
||||
|
||||
return res
|
||||
|
@ -366,9 +374,7 @@ class ogLiveWorker(ServerWorker):
|
|||
p = subprocess.Popen (['/usr/bin/browser', '-qws', url])
|
||||
try:
|
||||
p.wait (2) ## if the process dies before 2 seconds...
|
||||
logger.error ('Error al ejecutar la llamada a la interface de administración')
|
||||
logger.error ('Error en la creación del proceso hijo')
|
||||
logger.error ('return code "{}"'.format (p.returncode))
|
||||
logger.error ('Error al ejecutar browser, return code "{}"'.format (p.returncode))
|
||||
return False
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
|
@ -388,7 +394,7 @@ class ogLiveWorker(ServerWorker):
|
|||
logger.error (e)
|
||||
logger.error ('No se ha podido recuperar la dirección IP del cliente')
|
||||
return None
|
||||
logger.debug ('parametroscfg ({})'.format (parametroscfg))
|
||||
#logger.debug ('parametroscfg ({})'.format (parametroscfg))
|
||||
return parametroscfg
|
||||
|
||||
def cfg2obj (self, cfg):
|
||||
|
|
Loading…
Reference in New Issue