changed way of creating jobs. (Still with an issue)

This commit is contained in:
Daniel 2019-08-18 14:28:21 +02:00
parent ef33ca6897
commit dabf9a55d0
6 changed files with 44 additions and 41 deletions

3
.gitignore vendored
View file

@ -104,4 +104,5 @@ venv.bak/
.mypy_cache/
dirkules.db
.idea
telegram_config.py
telegram_config.py
dirkules_tasks.db

View file

@ -11,22 +11,13 @@ db = SQLAlchemy(app)
import dirkules.models
# create db if not exists
db.create_all()
# start communication
communicator = com.TelegramCom(app)
# start scheduler
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
communicator = com.TelegramCom(app)
# @app.before_first_request
from dirkules import tasks
# from dirkules.models import Time
# from sqlalchemy.orm.exc import NoResultFound
#
# try:
# Time.query.one()
# except NoResultFound:
# db.session.add(Time("Drives"))
# db.session.commit()
# import views
import dirkules.views

View file

@ -1,7 +1,9 @@
import os
from dirkules.telegram_config import *
# from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.jobstores.memory import MemoryJobStore
import datetime
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
# from apscheduler.jobstores.memory import MemoryJobStore
baseDir = os.path.abspath(os.path.dirname(__file__))
staticDir = os.path.join(baseDir, 'static')
@ -13,8 +15,8 @@ SQLALCHEMY_TRACK_MODIFICATIONS = False
# The SCHEDULER_EXECUTORS is a global configuration, in this case, only 1 thread will be used for all the jobs.
# I believe the best way for you is to use max_workers: 1 when running locally
# SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(baseDir, 'dirkules.db'))}
SCHEDULER_JOBSTORES = {'default': MemoryJobStore()}
SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(baseDir, 'dirkules_tasks.db'))}
# SCHEDULER_JOBSTORES = {'default': MemoryJobStore()}
SCHEDULER_EXECUTORS = {'default': {'type': 'threadpool', 'max_workers': 3}}
@ -22,4 +24,16 @@ SCHEDULER_JOB_DEFAULTS = {'coalesce': False, 'max_instances': 1}
SCHEDULER_API_ENABLED = True
# should not be here in final version
SECRET_KEY = b'gf3iz3V!R3@Ny!ri'
JOBS = [
{
'id': 'refresh_disks',
'func': 'dirkules.tasks:refresh_disks',
'trigger': 'interval',
'next_run_time': datetime.datetime.now(),
'replace_existing': True,
'seconds': 3600
}
]

View file

@ -2,9 +2,9 @@
import subprocess
def getAllDrives():
def get_all_drives():
drives = []
driveDict = []
drive_dict = []
keys = [
'name', 'model', 'serial', 'size', 'rota', 'rm', 'hotplug', 'state',
'smart'
@ -24,27 +24,27 @@ def getAllDrives():
lsblk.stdout.close()
del drives[0]
for line in drives:
newLine = ' '.join(line.split())
newLine = newLine.split(" ")
while len(newLine) > 7:
newLine[1] = newLine[1] + " " + newLine[2]
del newLine[2]
new_line = ' '.join(line.split())
new_line = new_line.split(" ")
while len(new_line) > 7:
new_line[1] = new_line[1] + " " + new_line[2]
del new_line[2]
values = []
for i in range(len(keys) - 2):
if newLine[i] == "0":
if new_line[i] == "0":
values.append(False)
elif newLine[i] == "1":
elif new_line[i] == "1":
values.append(True)
else:
values.append(newLine[i])
values.append(new_line[i])
values.append("running")
values.append(smartPassed("/dev/" + values[0]))
driveDict.append(dict(zip(keys, values)))
sorted_drive_dict = sorted(driveDict, key=lambda drive: drive['name'])
values.append(get_smart("/dev/" + values[0]))
drive_dict.append(dict(zip(keys, values)))
sorted_drive_dict = sorted(drive_dict, key=lambda drive: drive['name'])
return sorted_drive_dict
def smartPassed(device):
def get_smart(device):
passed = False
smartctl = subprocess.Popen(["smartctl -H " + device],
stdout=subprocess.PIPE,
@ -65,7 +65,7 @@ def smartPassed(device):
def part_for_disk(device):
# lsblk /dev/sdd -b -o NAME,LABEL,FSTYPE,SIZE,UUID,MOUNTPOINT
parts = []
partdict = list()
part_dict = list()
keys = ['name', 'label', 'fs', 'size', 'uuid', 'mount']
device = "/dev/" + device
lsblk = subprocess.Popen(
@ -103,6 +103,6 @@ def part_for_disk(device):
values = list()
for start, end in zip(element_length, element_length[1:]):
values.append(part[start:(end - 1)].strip())
partdict.append(dict(zip(keys, values)))
part_dict.append(dict(zip(keys, values)))
return partdict
return part_dict

View file

@ -28,7 +28,7 @@ def get_partitions(drive_name):
def get_drives():
current_time = datetime.datetime.now()
drive_dict = hardware_drives.getAllDrives()
drive_dict = hardware_drives.get_all_drives()
for drive in drive_dict:
drive_obj = Drive(
drive.get("name"), drive.get("model"), drive.get("serial"),

View file

@ -1,9 +1,6 @@
from dirkules import scheduler
import datetime
import dirkules.manager.driveManager as drive_man
@scheduler.task('interval', id='refresh_disks', seconds=3600, next_run_time=datetime.datetime.now())
def refresh_disks():
drive_man.get_drives()
print("Drives refreshed")
print("Drives haha refreshed")