various redis improvements

This commit is contained in:
checktheroads 2019-06-15 12:42:28 -07:00
parent 2215f5a4f9
commit eece919376
3 changed files with 59 additions and 33 deletions

View file

@ -212,7 +212,12 @@ def params():
general["primary_asn"] = config["general"].get("primary_asn", "65000")
general["org_name"] = config["general"].get("org_name", "The Company")
general["google_analytics"] = config["general"].get("google_analytics", "")
general["redis_host"] = config["general"].get("redis_host", "localhost")
general["redis_port"] = config["general"].get("redis_port", 6379)
features["rate_limit"] = config["features"]["rate_limit"]
features["rate_limit"]["redis_id"] = config["features"]["rate_limit"].get(
"redis_id", 1
)
features["rate_limit"]["query"] = config["features"]["rate_limit"]["query"]
features["rate_limit"]["query"]["rate"] = config["features"]["rate_limit"][
"query"
@ -261,10 +266,8 @@ def params():
"site"
].get("button", "Try Again")
features["cache"] = config["features"]["cache"]
features["cache"]["redis_id"] = config["features"]["cache"].get("redis_id", 0)
features["cache"]["timeout"] = config["features"]["cache"].get("timeout", 120)
features["cache"]["directory"] = config["features"]["cache"].get(
"directory", os.path.join(hyperglass_root, "/tmp/hyperglass_cache")
)
features["cache"]["show_text"] = config["features"]["cache"].get("show_text", True)
features["cache"]["text"] = config["features"]["cache"].get(
"text",

View file

@ -17,22 +17,44 @@ timeout = 60
keepalive = 10
# Prometheus Multiprocessing directory, set as environment variable
prometheus_multiproc_dir = "/tmp/prometheus_multiproc_dir"
prometheus_multiproc_dir = "/tmp/hyperglass_prometheus"
def on_starting(server): # pylint: disable=unused-argument
"""Pre-startup Gunicorn Tasks"""
# Renders Jinja2 -> Sass, compiles Sass -> CSS prior to worker load
try:
# Renders Jinja2 -> Sass, compiles Sass -> CSS prior to worker load
import hyperglass.render
hyperglass.render.css()
print(1)
except ImportError as error_exception:
logger.error(f"Exception occurred:\n{error_exception}")
# Verify Redis is running
try:
import hyperglass.configuration
import redis
config = hyperglass.configuration.params()
redis_config = {
"host": config["general"]["redis_host"],
"port": config["general"]["redis_port"],
"charset": "utf-8",
"decode_responses": True,
"db": config["features"]["cache"]["redis_id"],
}
r_cache = redis.Redis(**redis_config)
if r_cache.set("testkey", "testvalue", ex=1):
logger.debug("Redis is working properly")
except (redis.exceptions.ConnectionError):
logger.error("Redis is not running")
raise EnvironmentError("Redis is not running")
# Prometheus multiprocessing directory
shutil.rmtree(prometheus_multiproc_dir)
os.mkdir(prometheus_multiproc_dir)
if os.path.exists(prometheus_multiproc_dir):
shutil.rmtree(prometheus_multiproc_dir)
else:
os.mkdir(prometheus_multiproc_dir)
os.environ["prometheus_multiproc_dir"] = prometheus_multiproc_dir
@ -44,4 +66,5 @@ def worker_exit(server, worker): # pylint: disable=unused-argument
def on_exit(server):
shutil.rmtree(prometheus_multiproc_dir)
if os.path.exists(prometheus_multiproc_dir):
shutil.rmtree(prometheus_multiproc_dir)

View file

@ -21,20 +21,6 @@ from hyperglass.command import execute
from hyperglass import configuration
from hyperglass import render
# Make sure redis is started
try:
r_cache = redis.Redis(
host="localhost", port="6379", charset="utf-8", decode_responses=True, db=0
)
if r_cache.set("testkey", "testvalue", ex=1):
logger.debug("Redis is working properly")
except (redis.exceptions.ConnectionError):
logger.error("Redis is not running")
raise EnvironmentError("Redis is not running")
# Main Flask definition
app = Flask(__name__, static_url_path="/static")
# Logzero Configuration
if configuration.debug_state():
logzero.loglevel(logging.DEBUG)
@ -42,12 +28,25 @@ else:
logzero.loglevel(logging.INFO)
# Initialize general configuration parameters for reuse
# brand = configuration.branding()
config = configuration.params()
codes = configuration.codes()
codes_reason = configuration.codes_reason()
logger.debug(f"Configuration Parameters:\n {pprint(config)}")
# Redis Config
redis_config = {
"host": config["general"]["redis_host"],
"port": config["general"]["redis_port"],
"charset": "utf-8",
"decode_responses": True,
}
# Main Flask definition
app = Flask(__name__, static_url_path="/static")
# Redis Cache Config
r_cache = redis.Redis(**redis_config, db=config["features"]["rate_limit"]["redis_id"])
# Flask-Limiter Config
query_rate = config["features"]["rate_limit"]["query"]["rate"]
query_period = config["features"]["rate_limit"]["query"]["period"]
@ -59,12 +58,12 @@ logger.debug(f"Query rate limit: {rate_limit_query}")
logger.debug(f"Site rate limit: {rate_limit_site}")
# Redis Config for Flask-Limiter storage
r_limiter = redis.Redis(
host="localhost", port="6379", charset="utf-8", decode_responses=True, db=1
)
r_limiter_db = config["features"]["rate_limit"]["redis_id"]
r_limiter_url = f'redis://{redis_config["host"]}:{redis_config["port"]}/{r_limiter_db}'
r_limiter = redis.Redis(**redis_config, db=config["features"]["rate_limit"]["redis_id"])
# Adds Flask config variable for Flask-Limiter
app.config.update(RATELIMIT_STORAGE_URL="redis://localhost:6379/1")
app.config.update(RATELIMIT_STORAGE_URL=r_limiter_url)
# Initializes Flask-Limiter
limiter = Limiter(app, key_func=get_ipaddr, default_limits=[rate_limit_site])
# Prometheus Config
@ -204,10 +203,11 @@ def hyperglass_main():
try:
logger.debug(f"Sending query {cache_key} to execute module...")
cache_value = execute.Execute(lg_data).response()
logger.debug("Validated response...")
value_output = cache_value["output"]
value_code = cache_value["status"]
logger.debug(f"Status Code: {value_code}, Output: {value_output}")
logger.debug(
f"Validated response...\nStatus Code: {value_code}, Output: {value_output}"
)
# If it doesn't, create a cache entry
r_cache.hmset(cache_key, cache_value)
r_cache.expire(cache_key, cache_timeout)
@ -218,11 +218,11 @@ def hyperglass_main():
logger.debug(f"Returning {value_code} response")
return Response(response["output"], response["status"])
# If 400 error, return error message and code
# Note: 200 & 400 errors are separated mainly for potential future use
# ["code", "reason", "source", "type", "loc_id", "target"],
if value_code in [405, 415]:
count_errors.labels(
response[1],
codes_reason[response[1]],
response["status"],
codes_reason[response["status"]],
client_addr,
lg_data["type"],
lg_data["location"],