#!/usr/bin/env python3 # -*- coding: utf-8 -*- ############################################################################## # Mythimport V3 # # Import MythTV recordings exported by the On-The-Go export option of # Mythexport V3. # # Author: J S Worthington # # This version of mythimport is a complete re-write in Python. It is # compatible with MythTV 0.28+ and mythexport-daemon V3 only. It uses # individual *.sql files, one per exported recording, so that failure to # import one recording does not affect the importing of other recordings. Due # to the new 'recordedid' index field in the MythTV 0.28 'recorded' table, it # is no longer possible to use a simple mysql command line command to import # SQL for a recording, so the SQL is read and modified by the Python code # before being executed. # ############################################################################## ############################################################################## # Version 3.0.7 2019-07-02 # When the MySQL version mysqldump is run more than once, it will produce # lines starting with "mysqldump: " each time it is run. So when such # output files are concatenated, mythimport needs to possibly cope with # these messages between each table in its input file. # Version 3.0.8 2019-07-05 # Move associated *png files if present, as well as the recording file. # Version 3.1.0 2020-09-17 # Convert to Python 3 and make compatible with MythTV v31. # Version 3.1.1 2023-02-10 # Fix for mysqldump putting a messages like this: # WARNING: Forcing protocol to TCP due to option specification. Please explicitly state intended protocol. # at the start of a .sql file. ############################################################################## import argparse import glob import grp import logging import logging.handlers import lxml import math import MythTV import os import pwd import shutil import socket import stat import sys try: import sqlparse except: print('Please install sqlparse.') print('In Ubuntu:') print(' sudo apt install python3-sqlparse') print('If it is not available as a package, try "pip3 install sqlparse" or "pip install sqlparse".') exit(2) ############################################################################## # Configuration ############################################################################## PROGRAM_NAME = 'mythimport' VERSION = '3.1.1 2023-02-10 Python 3' # Default logging level. Log level used for output to the log file. LOGGING_LEVEL = 'debug' #LOGGING_LEVEL = 'info' # Default console logging level. Logging level used for output to the console. CONSOLE_LEVEL = 'info' # Default maximum size of a log file before it gets rotated. Units: bytes. MAXLOGFILESIZE = 100*1024*1024 # Default maximum number of rotating log files to be kept. MAXLOGFILES = 9 # Directory to find the local MythTV config.xml in. LOCAL_CONFDIR = '/etc/mythtv' # Stop moving files to storagegroup recording partitions when free space would # become less than this. # # It is vital that this number exceeds the limit at which MythTV will expire # recordings. Otherwise, the regular checks for free space *will* expire # enough recordings so that whatever size of free space you have specified # in your MythTV settings is available. And then a later import operation # will put more recordings on the drive, which will then cause more recordings # to be expired. Units: bytes. Default: 20 gibibytes. # 20 gibibytes MIN_STG_FREE_SPACE = 20*1024*1024*1024 ############################################################################## # Constants ############################################################################## FAILED_EXT = '.failed' GLOB_SQL_EXT = '*.sql' IMPORTED_EXT = '.imported' IMPORTING_EXT = '.importing' ############################################################################## # Get the hostname. # Modified from: # http://stackoverflow.com/questions/4271740/how-can-i-use-python-to-get-the-system-hostname ############################################################################## def _get_hostname(): name = socket.gethostname() hostbyaddr = socket.gethostbyaddr(name)[0] if name.find('.') >= 0 or hostbyaddr == 'localhost': return name else: return hostbyaddr ############################################################################## # Find the mountpoint from a given path. # Copied from: # http://stackoverflow.com/questions/4453602/how-to-find-the-mountpoint-a-file-resides-on ############################################################################## def _find_mount_point(path): path = os.path.abspath(path) while not os.path.ismount(path): path = os.path.dirname(path) return path ############################################################################## # Convert a loglevel string to a loglevel value. # # No error checking is done as the 'level' value is assumed to have been # created using argparse from a valid list of levels. ############################################################################## def _str_to_loglevel(level): return { 'none': logging.NOTSET, 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL }[level] ############################################################################## # Python logging handler for output via the MythTV MythLog Python bindings. ############################################################################## class MythLogHandler(logging.Handler): def __init__(self, logfile): super(MythLogHandler, self).__init__() MythTV.MythLog._setfile(logfile) MythTV.MythLog._setlevel('important,general') self.log = MythTV.MythLog(module='') def emit(self, record): self.log(MythTV.MythLog.GENERAL, MythTV.MythLog.INFO, self.format(record)) ############################################################################## # Round up to the next highest multiple. ############################################################################## def round_up(num, divisor): return (int(math.ceil((num - 1) / divisor)) + 1) * divisor ############################################################################## # Get the free space on a filesystem. ############################################################################## def get_fs_freespace(pathname): "Get the free space of the filesystem containing pathname" stat = os.statvfs(pathname) # Use f_bfree for superuser, or f_bavail if filesystem has reserved space # for superuser. return stat.f_bavail*stat.f_bsize ############################################################################## # Determine if two paths are on the same partition. ############################################################################## def same_partition(f1, f2): return os.stat(f1).st_dev == os.stat(f2).st_dev ############################################################################## # Extract fields required for database access from a MythTV config.xml file. # Modified from /usr/lib/python2.7/dist-packages/MythTV/database.py ############################################################################## class _ConfigXml: _conf_trans = {'PingHost':'pinghost', 'Host':'hostname', 'UserName':'username', 'Password':'password', 'DatabaseName':'database', 'Port':'port'} def read_xml(self, confdir): filename = os.path.join(confdir, 'config.xml') if not os.access(filename, os.R_OK): return False try: config = lxml.etree.parse(filename) name = config.xpath('/Configuration/LocalHostName/text()') if len(name): self.profile = name[0] for child in config.xpath('/Configuration/Database')[0].getchildren(): if child.tag in self._conf_trans: setattr(self, self._conf_trans[child.tag], child.text) except: return False return True def read_old_xml(self, confdir): filename = os.path.join(confdir, 'config.xml') if not os.access(filename, os.R_OK): return False try: config = lxml.etree.parse(filename) trans = {'DBHostName':'hostname', 'DBUserName':'username', 'DBPassword':'password', 'DBName':'database', 'DBPort':'port'} for child in config.xpath('/Configuration/UPnP/MythFrontend/' + 'DefaultBackend')[0].getchildren(): if child.tag in trans: setattr(self, trans[child.tag], child.text) except: raise return False return True def __init__(self, confdir): global logger _conf_trans = {'PingHost':'pinghost', 'Host':'hostname', 'UserName':'username', 'Password':'password', 'DatabaseName':'database', 'Port':'port'} self.pinghost = MythTV.QuickProperty('_pinghost', False, bool) self.port = MythTV.QuickProperty('_port', 3306, int) self.pin = MythTV.QuickProperty('_pin', 0000, int) self.hostname = MythTV.QuickProperty('_hostname', '127.0.0.1') self.username = MythTV.QuickProperty('_username', 'mythtv') self.password = MythTV.QuickProperty('_password', 'mythtv') self.database = MythTV.QuickProperty('_database', 'mythconverg') if confdir and (confdir != '/'): confdir = confdir if self.read_xml(confdir): pass elif self.read_old_xml(confdir): pass else: logger.critical('Failed to read database credentials from: {0}'.format(os.path.join(confdir, 'config.xml'))) exit(3) ############################################################################## # Filter out non-SQL lines in the SQL input. ############################################################################## def _non_sql_line_filter(sql): filtered_sql = '' for line in sql.splitlines(True): if line[0:11] == 'mysqldump: ': logger.warning('mysqldump message found: ' + line) elif line[0:8] == 'WARNING:': logger.warning('Warning message found: ' + line) else: filtered_sql += line return filtered_sql ############################################################################## # Generate modified SQL from imported SQL. ############################################################################## def _get_wanted_columns(dbc, table_name): # Get column names for table, omitting unwanted `recordedid` columns. wanted_columns = '' dbc.execute('desc ' + table_name + ';') for row in dbc: if row[0] != 'recordedid': wanted_columns += row[0] + ',' return wanted_columns[:-1]; def _modified_sql(sql): global logger TABLE_START_STRING = '-- Dumping data for table `' TABLE_START_STRING_LEN = len(TABLE_START_STRING) wanted_recorded_columns = _get_wanted_columns(dbc, 'recorded') logger.debug(str(wanted_recorded_columns)) sql_parts = sqlparse.split(sql) yield '', 'set sql_notes=0;' yield '', 'drop table if exists mythimport_recorded;' yield '', 'drop table if exists mythimport_recordedfile;' yield '', 'set sql_notes=1;' yield '', 'create table mythimport_recorded like recorded;' yield '', 'create table mythimport_recordedfile like recordedfile;' line = 0 table_name = '' prev_table_name = '' for sql_part in sql_parts: line += 1 if sql_part.strip() != '': table_name_index = sql_part.find(TABLE_START_STRING) if table_name_index != -1: table_name_index += TABLE_START_STRING_LEN table_name_end_index = sql_part.find('`', table_name_index) if table_name_end_index == -1: raise IndexError prev_table_name = table_name table_name = sql_part[table_name_index:table_name_end_index] logger.debug('New table name: ' + table_name) if table_name == 'recorded': sql_part = sql_part.replace('`recorded`', '`mythimport_recorded`') elif table_name == 'recordedfile': sql_part = sql_part.replace('`recordedfile`', '`mythimport_recordedfile`') if prev_table_name != table_name: if prev_table_name == 'recorded': yield '', 'set @basename = (select basename from mythimport_recorded limit 1);' yield '', 'set @old_recordedid = (select recordedid from mythimport_recorded limit 1);' yield 'basename', 'select @basename;' yield 'old_recordedid', 'select @old_recordedid;' yield '', 'insert into recorded (%s) (select %s from mythimport_recorded);' % (wanted_recorded_columns, wanted_recorded_columns) yield '', 'set @recordedid = (select recordedid from recorded where basename=@basename limit 1);' yield 'new_recordedid', 'select @recordedid;' yield '', 'delete from mythimport_recorded;' elif prev_table_name == 'recordedfile': yield '', 'update mythimport_recordedfile set recordedid=@recordedid;' yield '', 'insert into recordedfile (select * from mythimport_recorded);' yield '', 'delete from mythimport_recordedfile;' prev_table_name = table_name yield '', sql_part logger.debug('\nLine ' + str(line) + ': ' + sql_part) yield '', 'set sql_notes=0;' yield '', 'drop table if exists mythimport_recorded' yield '', 'drop table if exists mythimport_recordedfile' yield '', 'set sql_notes=1;' ############################################################################## # Import the recordings exported by mythexport. ############################################################################## def _import_recordings(dbc, import_dir = '', export_dir = '', move=False, cleanup=False): global logger if import_dir != '' and not import_dir.endswith('/'): import_dir = import_dir + '/' if export_dir != '' and not export_dir.endswith('/'): export_dir = export_dir + '/' logger.debug('Checking ' + import_dir + ' for *.importing files') importing_files = glob.glob(import_dir + GLOB_SQL_EXT + IMPORTING_EXT) if len(importing_files) != 0: logger.error("Error: *.importing files found:") for sql_file in importing_files: logger.info(sql_file) exit(4) if move: dst_blksize = os.statvfs(export_dir).f_bsize try: mythtv_uid = pwd.getpwnam('mythtv').pw_uid logger.debug('mythtv_uid=' + str(mythtv_uid)) except: logger.warning('Warning: User "mythtv" not found, files will not have their user id set. MythTV may not be able to access the files.') mythtv_uid = -1 try: mythtv_gid = grp.getgrnam('mythtv').gr_gid logger.debug('mythtv_gid=' + str(mythtv_gid)) except: logger.warning('Warning: Group "mythtv" not found, files will not have their group id set. MythTV may not be able to access the files.') mythtv_gid = -1 importing_count = 0 imported_count = 0 for sql_file in glob.glob(import_dir + '*.sql'): logger.info('Found ' + sql_file + ' to import') shutil.move(sql_file, sql_file + IMPORTING_EXT) sqlfilehandle = open(sql_file + IMPORTING_EXT) sql = sqlfilehandle.read() sqlfilehandle.close() line = 0 importing_count +=1 for (action, sql_part) in _modified_sql(_non_sql_line_filter(sql)): logger.debug('action = ' + action) logger.debug('sql_part = ' + sql_part) line += 1 logger.debug('\nSQL Line ' + str(line) + ': ' + sql_part) try: sql_result = dbc.execute(sql_part) except: e = sys.exc_info()[0] logger.error("Exception executing SQL for %s: %s\naction was: %s\nSQL was: %s\nSQL file not imported" % (sql_file, e, action, sql_part)) shutil.move(sql_file + IMPORTING_EXT, sql_file + FAILED_EXT) break logger.debug('sql_result = ' + str(sql_result)) if action != '': logger.debug('Action: ' + action) if action == 'basename': basename = dbc.fetchone()[0] logger.debug('basename = ' + basename) if not os.access(import_dir + basename, os.R_OK): logger.error('.. file %s not found, so not importing SQL file %s' % (basename, sql_file)) shutil.move(sql_file + IMPORTING_EXT, sql_file + FAILED_EXT) break dbc.execute("select count(*) from recorded where basename='%s';" % basename) if dbc.fetchone()[0] != 0: logger.error('.. file %s already exists in the recorded table, so SQL file %s not imported' % (basename, sql_file)) shutil.move(sql_file + IMPORTING_EXT, sql_file + FAILED_EXT) break associated_files = glob.glob(import_dir + basename + "*png") logger.debug('associated_files=' + str(associated_files)) if move and not same_partition(import_dir, export_dir): dst_free_required = round_up(os.path.getsize(import_dir + basename), dst_blksize) for associated_file in associated_files: dst_free_required += round_up(os.path.getsize(associated_file), dst_blksize) if get_fs_freespace(export_dir) < dst_free_required + MIN_STG_FREE_SPACE: logger.error('.. insufficient free space to move recording file(s) %s to, so SQL file %s not imported' % (basename, sql_file)) shutil.move(sql_file + IMPORTING_EXT, sql_file) break elif action == 'old_recordedid': old_recordedid = dbc.fetchone()[0] elif action == 'new_recordedid': new_recordedid = dbc.fetchone()[0] else: shutil.move(sql_file + IMPORTING_EXT, sql_file + IMPORTED_EXT) imported_count += 1 logger.info('.. ' + sql_file + ' imported (recordedid changed from %d to %d)' % (old_recordedid, new_recordedid)) if move: associated_files.append(import_dir + basename) for filename in associated_files: destname = export_dir + os.path.basename(filename) logger.info('.. moving ' + filename + ' to ' + destname) shutil.move(filename, destname) os.chown(destname, mythtv_uid, mythtv_gid) os.chmod(destname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH) st = os.stat(destname) if mythtv_uid != -1 and st[stat.ST_UID] != mythtv_uid or mythtv_gid != -1 and st[stat.ST_GID] != mythtv_gid: logger.warning('Warning: chown failed, file %s has uid=%d and gid=%d, should be uid=%d and gid=%d' % (destname, st[stat.ST_UID], st[stat.ST_GID], mythtv_uid, mythtv_gid)) if cleanup: logger.debug('.. deleting SQL file ' + sql_file + IMPORTED_EXT) os.remove(sql_file + IMPORTED_EXT) return importing_count, imported_count ############################################################################## # Main ############################################################################## parser = argparse.ArgumentParser( description='Import MythTV recordings exported by Mythexport (Version: ' + VERSION + ')', formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=15) ) parser.add_argument('-1', '--dontrotatelogs', dest='rotate', default=True, action='store_false', help='do not rotate log files, just overwrite to the same (single) log file each time ' + PROGRAM_NAME + ' is run (default: false)') parser.add_argument('-a', '--append_to_log', dest='append', default=False, action='store_true', help='append output to the end of the existing log file. Does not work with --mythlog.') parser.add_argument('-c', '--consolelevel', default=CONSOLE_LEVEL, choices=['debug', 'info', 'warning', 'error', 'critical'], help='sets the console logging level (default: ' + CONSOLE_LEVEL + ')') parser.add_argument('-i', '--input_dir', default='', help='input directory (default: )') parser.add_argument('-l', '--loglevel', default=LOGGING_LEVEL, choices=['none', 'debug', 'info', 'warning', 'error', 'critical'], help = 'sets the logging level in the log file (default: ' + LOGGING_LEVEL + ')') parser.add_argument('--logdir', default='.', help='logging directory (default: )') parser.add_argument('-m', '--nomove', dest='move', default=True, action='store_false', help='do not move the recording files that have been imported to the output directory (default: false, do move the recording files)') parser.add_argument('--mythlog', '--MythLog', dest='mythlog', default=False, action='store_true', help='use MythLog python binding for logging. Implies -1. ' 'For log rotation when using --mythlog, try using logrotate (eg create a /etc/logrotate.d/mythimport file)') parser.add_argument('-n', '--nocleanup', dest='cleanup', default=True, action='store_false', help='do not delete the .sql files that have been imported (default: false, do delete the .sql files)') parser.add_argument('-o', '--output_dir', default='', help='output directory (should be in a MythTV storage group). ' "If not specified, defaults to the first directory in the 'Default' storage group that is on the same mountpoint as the input directory.") parser.add_argument('-r', '--rotate_max', type=int, dest='maxlogfiles', default=MAXLOGFILES, help='maximum number of log files to keep when rotating (default: ' + str(MAXLOGFILES) + ')') parser.add_argument('-s', '--rotate_size', type=int, dest='maxlogfilesize', default=MAXLOGFILESIZE, help='maximum size of a log file before it will be rotated (default: ' + str(MAXLOGFILESIZE) + ' bytes)') parser.add_argument('-v', '-V', '--version', dest='display_version', default=False, action='store_true') args = parser.parse_args() if args.display_version: print(PROGRAM_NAME + ' version ' + VERSION) exit(0) loglevel = _str_to_loglevel(args.loglevel) consolelevel = _str_to_loglevel(args.consolelevel) if args.logdir == '': args.logdir = '.' if not args.logdir.endswith('/'): args.logdir = args.logdir + '/' if args.input_dir == '': args.input_dir = '.' args.input_dir = os.path.realpath(args.input_dir) if args.mythlog: args.rotate = False # Initialise logging. logger = logging.getLogger(PROGRAM_NAME) logger.setLevel(min(loglevel, consolelevel)) if loglevel != logging.NOTSET: # Log file for all output messages. logfile = args.logdir + PROGRAM_NAME + '.log' try: if args.mythlog: fh = MythLogHandler(logfile) elif args.rotate: fh = logging.handlers.RotatingFileHandler( logfile, maxBytes = args.maxlogfilesize, backupCount = args.maxlogfiles ) else: if args.append: fh = logging.FileHandler( logfile, mode='a' ) else: fh = logging.FileHandler( logfile, mode='w' ) except IOError as e: if e.errno == 13: print('Exception opening log file ' + logfile + ' (errno 13: permission denied)' '\nYou may need to create the log file manually and give it the correct ownership and permissions.' '\nFor example:' '\n sudo touch ' + logfile + '\n sudo chown mythtv:mythtv ' + logfile + '\n sudo chmod a=rw ' + logfile + '\n' '\nOr you could try using the --mythlog option to use the MythLog Python bindings for logging.' ) else: raise exit(1) if args.rotate: try: fh.doRollover() except OSError as e: if e.errno == 13: print('Exception rotating log file ' + logfile + ' (errno 13: permission denied)' '\nYou probably do not have permission to create a new log file when rotating.' '\nTry using the "-1" option to use a single, non-rotating log file.' ) else: print('Raising...') raise exit(1) if args.mythlog: fh_formatter = logging.Formatter('%(levelname)s %(message)s') else: fh_formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) fh.setFormatter(fh_formatter) logger.addHandler(fh) fh.setLevel(loglevel) ch = logging.StreamHandler(stream=sys.stdout) ch_formatter = logging.Formatter( '%(message)s' ) ch.setFormatter(ch_formatter) logger.addHandler(ch) ch.setLevel(consolelevel) logger.debug('args=' + str(args)) logger.debug('loglevel=' + str(loglevel)) logger.debug('consolelevel=' + str(consolelevel)) logger.debug('MYTHCONFDIR=' + os.environ.get('MYTHCONFDIR', '')) logger.debug('USER=' + os.environ.get('USER', '')) logger.debug('HOME=' + os.environ.get('HOME', '')) logger.debug('UID=' + str(os.getuid())) logger.debug('GID=' + str(os.getgid())) if (MythTV.__version__[0], MythTV.__version__[1]) < (0, 28): logger.error('Error: MythTV version is ' + str(MythTV.__version__[0]) + '.' + str(MythTV.__version__[1])+ ', must be 0.28 or greater' ) exit(1) local_config = _ConfigXml(LOCAL_CONFDIR) db = MythTV.MythDB( DBHostName = local_config.hostname, DBUsername = local_config.username, DBPassword = local_config.password, DBName = local_config.database, DBPort = local_config.port ) dbc = db.cursor() # Change the SQL mode settings for the session to allow zeroes in # timestamps, as required by MythTV. #rows = dbc.execute("SELECT @@SESSION.sql_mode;") #if rows != 1: # raise MythDBError #sql_mode = dbc.fetchone()[0] #logger.debug("original sql_mode = " + str(sql_mode)) #sql_mode = sql_mode.replace('NO_ZERO_IN_DATE,', '') #sql_mode = sql_mode.replace('NO_ZERO_DATE,', '') #sql_mode = sql_mode.replace('NO_ZERO_IN_DATE', '') #sql_mode = sql_mode.replace('NO_ZERO_DATE', '') #sql_mode = 'STRICT_ALL_TABLES,' + sql_mode rows = dbc.execute('SET sql_notes=0;') logger.debug('SET sql_notes=0; rows=' + str(rows)) sql_mode = 'STRICT_ALL_TABLES,ONLY_FULL_GROUP_BY,NO_ENGINE_SUBSTITUTION' logger.debug("sql_mode setting = " + str(sql_mode)) dbc.execute("SET SESSION sql_mode = '" + sql_mode + "';") rows = dbc.execute("SELECT @@SESSION.sql_mode;") if rows != 1: raise MythDBError sql_mode = dbc.fetchone()[0] logger.debug("new sql_mode = " + str(sql_mode)) dbc.execute('SET sql_notes=1;') if args.output_dir == '': # Find a directory in the Default storage group that is on the same mountpoint as args.input_dir. input_mp = _find_mount_point(args.input_dir) logger.debug('Mountpoint for input_dir ' + args.input_dir + ' is ' + input_mp) hostname = _get_hostname() logger.debug('hostname=' + hostname) print('hostname=' + hostname) rows = dbc.execute("select dirname from storagegroup where hostname='" + hostname + "' and groupname='Default'") if rows == 0: logger.error('Error: Default storage group does not exist in your database!') exit(1) while rows != 0: dirname = dbc.fetchone()[0] dirname_mp = _find_mount_point(dirname) logger.debug('Mountpoint for Default storage group directory ' + dirname + ' is ' + dirname_mp) if input_mp == dirname_mp: break rows -= 1 if rows == 0: logger.error('Error: There is no Default storage group directory on the same mountpoint as "' + args.input_dir + '" - please use the -o option to specify the output directory you want.') exit(1) args.output_dir = dirname if args.move: if not os.path.isdir(args.output_dir): logger.error('Error: output directory ' + args.output_dir + ' is not a directory!') exit(1) elif not os.access(args.output_dir, os.W_OK): logger.error('Error: output directory ' + args.output_dir + ' is not writeable!') exit(1) elif os.path.realpath(args.input_dir) == os.path.realpath(args.output_dir): logger.error('Error: output directory ' + args.output_dir + ' can not be the same as the input directory!') exit(1) logger.info('Will move recording files to directory ' + args.output_dir) importing_count, imported_count = _import_recordings(dbc, args.input_dir, args.output_dir, args.move, args.cleanup) logger.info('Mythimport finished:\n Total recordings: %d\n Successful imports: %d\n Failed imports: %d' % (importing_count, imported_count, importing_count - imported_count)) exit(0)