removed Flask-Caching, migrated to native Redis caching. Migrated Flask-Limiter to Redis storage

This commit is contained in:
checktheroads 2019-06-15 11:35:55 -07:00
parent abdc73bdd3
commit 2215f5a4f9
5 changed files with 54 additions and 41 deletions

View file

@ -240,7 +240,8 @@ class Execute:
) )
if not validity: if not validity:
logger.debug(f"Invalid query") logger.debug(f"Invalid query")
return msg, status, self.input_data ## return msg, status, self.input_data
return {"output": msg, "status": status}
connection = None connection = None
output = config["messages"]["general"] output = config["messages"]["general"]
info = self.input_data info = self.input_data
@ -249,7 +250,8 @@ class Execute:
connection = Rest("rest", device_config, self.input_type, self.input_target) connection = Rest("rest", device_config, self.input_type, self.input_target)
raw_output, status = connection.frr() raw_output, status = connection.frr()
output = self.parse(raw_output, device_config["type"]) output = self.parse(raw_output, device_config["type"])
return output, status, info ## return output, status, info
return {"output": output, "status": status}
if device_config["type"] in configuration.scrape_list(): if device_config["type"] in configuration.scrape_list():
logger.debug(f"Initializing Netmiko...") logger.debug(f"Initializing Netmiko...")
connection = Netmiko( connection = Netmiko(
@ -263,9 +265,11 @@ class Execute:
logger.debug( logger.debug(
f'Parsed output for device type {device_config["type"]}:\n{output}' f'Parsed output for device type {device_config["type"]}:\n{output}'
) )
return output, status, info ## return output, status, info
return {"output": output, "status": status}
if device_config["type"] not in configuration.supported_nos(): if device_config["type"] not in configuration.supported_nos():
logger.error( logger.error(
f"Device not supported, or no commands for device configured. {status}, {info}" f"Device not supported, or no commands for device configured. {status}, {info}"
) )
return output, status, info ## return output, status, info
return {"output": output, "status": status}

View file

@ -241,12 +241,12 @@ def params():
Please wait one minute and try again.""", Please wait one minute and try again.""",
) )
features["rate_limit"]["site"] = config["features"]["rate_limit"]["site"] features["rate_limit"]["site"] = config["features"]["rate_limit"]["site"]
features["rate_limit"]["site"]["rate"] = config["features"]["rate_limit"].get( features["rate_limit"]["site"]["rate"] = config["features"]["rate_limit"][
"rate", 60 "site"
) ].get("rate", 60)
features["rate_limit"]["site"]["period"] = config["features"]["rate_limit"].get( features["rate_limit"]["site"]["period"] = config["features"]["rate_limit"][
"period", "minute" "site"
) ].get("period", "minute")
features["rate_limit"]["site"]["title"] = config["features"]["rate_limit"][ features["rate_limit"]["site"]["title"] = config["features"]["rate_limit"][
"site" "site"
].get("title", "Limit Reached") ].get("title", "Limit Reached")
@ -257,6 +257,9 @@ def params():
f'You have accessed this site more than {features["rate_limit"]["site"]["rate"]} ' f'You have accessed this site more than {features["rate_limit"]["site"]["rate"]} '
f'times in the last {features["rate_limit"]["site"]["period"]}.', f'times in the last {features["rate_limit"]["site"]["period"]}.',
) )
features["rate_limit"]["site"]["button"] = config["features"]["rate_limit"][
"site"
].get("button", "Try Again")
features["cache"] = config["features"]["cache"] features["cache"] = config["features"]["cache"]
features["cache"]["timeout"] = config["features"]["cache"].get("timeout", 120) features["cache"]["timeout"] = config["features"]["cache"].get("timeout", 120)
features["cache"]["directory"] = config["features"]["cache"].get( features["cache"]["directory"] = config["features"]["cache"].get(

View file

@ -3,6 +3,7 @@ https://github.com/checktheroads/hyperglass
Guncorn configuration Guncorn configuration
""" """
import os import os
import shutil
import multiprocessing import multiprocessing
from logzero import logger from logzero import logger
@ -30,6 +31,7 @@ def on_starting(server): # pylint: disable=unused-argument
except ImportError as error_exception: except ImportError as error_exception:
logger.error(f"Exception occurred:\n{error_exception}") logger.error(f"Exception occurred:\n{error_exception}")
# Prometheus multiprocessing directory # Prometheus multiprocessing directory
shutil.rmtree(prometheus_multiproc_dir)
os.mkdir(prometheus_multiproc_dir) os.mkdir(prometheus_multiproc_dir)
os.environ["prometheus_multiproc_dir"] = prometheus_multiproc_dir os.environ["prometheus_multiproc_dir"] = prometheus_multiproc_dir
@ -42,9 +44,4 @@ def worker_exit(server, worker): # pylint: disable=unused-argument
def on_exit(server): def on_exit(server):
try:
import shutil
except ImportError as error_exception:
logger.error(f"Exception occurred:\n{error_exception}")
shutil.rmtree(prometheus_multiproc_dir) shutil.rmtree(prometheus_multiproc_dir)

View file

@ -8,10 +8,10 @@ import logging
from pprint import pprint from pprint import pprint
# Module Imports # Module Imports
import redis
import logzero import logzero
from logzero import logger from logzero import logger
from flask import Flask, request, Response from flask import Flask, request, Response
from flask_caching import Cache
from flask_limiter import Limiter from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr from flask_limiter.util import get_ipaddr
from prometheus_client import generate_latest, Counter, CollectorRegistry, multiprocess from prometheus_client import generate_latest, Counter, CollectorRegistry, multiprocess
@ -21,6 +21,17 @@ from hyperglass.command import execute
from hyperglass import configuration from hyperglass import configuration
from hyperglass import render from hyperglass import render
# Make sure redis is started
try:
r_cache = redis.Redis(
host="localhost", port="6379", charset="utf-8", decode_responses=True, db=0
)
if r_cache.set("testkey", "testvalue", ex=1):
logger.debug("Redis is working properly")
except (redis.exceptions.ConnectionError):
logger.error("Redis is not running")
raise EnvironmentError("Redis is not running")
# Main Flask definition # Main Flask definition
app = Flask(__name__, static_url_path="/static") app = Flask(__name__, static_url_path="/static")
@ -44,22 +55,17 @@ site_rate = config["features"]["rate_limit"]["site"]["rate"]
site_period = config["features"]["rate_limit"]["site"]["period"] site_period = config["features"]["rate_limit"]["site"]["period"]
rate_limit_query = f"{query_rate} per {query_period}" rate_limit_query = f"{query_rate} per {query_period}"
rate_limit_site = f"{site_rate} per {site_period}" rate_limit_site = f"{site_rate} per {site_period}"
limiter = Limiter(app, key_func=get_ipaddr, default_limits=[rate_limit_site])
logger.debug(f"Query rate limit: {rate_limit_query}") logger.debug(f"Query rate limit: {rate_limit_query}")
logger.debug(f"Site rate limit: {rate_limit_site}") logger.debug(f"Site rate limit: {rate_limit_site}")
# Flask-Caching Config # Redis Config for Flask-Limiter storage
cache_directory = config["features"]["cache"]["directory"] r_limiter = redis.Redis(
cache_timeout = config["features"]["cache"]["timeout"] host="localhost", port="6379", charset="utf-8", decode_responses=True, db=1
cache = Cache(
app,
config={
"CACHE_TYPE": "filesystem",
"CACHE_DIR": cache_directory,
"CACHE_DEFAULT_TIMEOUT": cache_timeout,
},
) )
logger.debug(f"Cache directory: {cache_directory}, Cache timeout: {cache_timeout}") # Adds Flask config variable for Flask-Limiter
app.config.update(RATELIMIT_STORAGE_URL="redis://localhost:6379/1")
limiter = Limiter(app, key_func=get_ipaddr, default_limits=[rate_limit_site])
# Prometheus Config # Prometheus Config
count_data = Counter( count_data = Counter(
@ -190,25 +196,27 @@ def hyperglass_main():
# Stringify the form response containing serialized JSON for the request, use as key for k/v # Stringify the form response containing serialized JSON for the request, use as key for k/v
# cache store so each command output value is unique # cache store so each command output value is unique
cache_key = str(lg_data) cache_key = str(lg_data)
# Define cache entry expiry time
cache_timeout = config["features"]["cache"]["timeout"]
logger.debug(f"Cache Timeout: {cache_timeout}")
# Check if cached entry exists # Check if cached entry exists
if cache.get(cache_key) is None: if not r_cache.hgetall(cache_key):
try: try:
logger.debug(f"Sending query {cache_key} to execute module...") logger.debug(f"Sending query {cache_key} to execute module...")
cache_value = execute.Execute(lg_data).response() cache_value = execute.Execute(lg_data).response()
logger.debug(f"Validated response...") logger.debug("Validated response...")
value_code = cache_value[1] value_output = cache_value["output"]
value_entry = cache_value[0:2] value_code = cache_value["status"]
logger.debug( logger.debug(f"Status Code: {value_code}, Output: {value_output}")
f"Status Code: {value_code}, Output: {cache_value[1]}, Info: {cache_value[2]}"
)
# If it doesn't, create a cache entry # If it doesn't, create a cache entry
cache.set(cache_key, value_entry) r_cache.hmset(cache_key, cache_value)
r_cache.expire(cache_key, cache_timeout)
logger.debug(f"Added cache entry for query: {cache_key}") logger.debug(f"Added cache entry for query: {cache_key}")
# If 200, return output # If 200, return output
response = cache.get(cache_key) response = r_cache.hgetall(cache_key)
if value_code == 200: if value_code == 200:
logger.debug(f"Returning {value_code} response") logger.debug(f"Returning {value_code} response")
return Response(response[0], response[1]) return Response(response["output"], response["status"])
# If 400 error, return error message and code # If 400 error, return error message and code
# Note: 200 & 400 errors are separated mainly for potential future use # Note: 200 & 400 errors are separated mainly for potential future use
if value_code in [405, 415]: if value_code in [405, 415]:
@ -221,12 +229,12 @@ def hyperglass_main():
lg_data["target"], lg_data["target"],
).inc() ).inc()
logger.debug(f"Returning {value_code} response") logger.debug(f"Returning {value_code} response")
return Response(response[0], response[1]) return Response(response["output"], response["status"])
except: except:
logger.error(f"Unable to add output to cache: {cache_key}") logger.error(f"Unable to add output to cache: {cache_key}")
raise raise
# If it does, return the cached entry # If it does, return the cached entry
else: else:
logger.debug(f"Cache match for: {cache_key}, returning cached entry") logger.debug(f"Cache match for: {cache_key}, returning cached entry")
response = cache.get(cache_key) response = r_cache.hgetall(cache_key)
return Response(response[0], response[1]) return Response(response["output"], response["status"])

View file

@ -11,3 +11,4 @@ logzero
click click
passlib passlib
prometheus_client prometheus_client
redis