Compare commits

...

13 commits
main ... 1.16.0

Author SHA1 Message Date
rjshrjndrn
3ae4983154 chore(helm): Updating chalice image release 2023-12-15 12:51:31 +00:00
Kraiem Taha Yassine
ece2631c60
fix(chalice): fixed wrong schema transformer (#1780) 2023-12-15 13:37:13 +01:00
rjshrjndrn
48954352fe chore(helm): Updating chalice image release 2023-12-14 17:02:35 +00:00
rjshrjndrn
d3c18f9af6 chore(helm): Updating alerts image release 2023-12-14 16:56:10 +00:00
rjshrjndrn
bd391ca935 chore(helm): Updating alerts image release 2023-12-14 16:56:10 +00:00
rjshrjndrn
362133f110 chore(helm): Updating chalice image release 2023-12-14 16:56:10 +00:00
Kraiem Taha Yassine
dcf6d24abd
fix(chalice): fix experimental sessions search with negative events and performance filters at the same time (#1777) 2023-12-14 17:55:14 +01:00
Kraiem Taha Yassine
b2ac6ba0f8
Crons v1.16.0 (#1776)
* refactor(chalice): moved db_request_handler to utils package

* refactor(chalice): moved db_request_handler to utils package
fix(chalice): supported usability tests in EE

* refactor(crons): changed assist_events_aggregates_cron to have only 1 execution every hour
refactor(crons): optimized assist_events_aggregates_cron to use only 1 DB cursor for successive queries
2023-12-13 18:06:33 +01:00
rjshrjndrn
34729e87ff chore(helm): Updating chalice image release 2023-12-13 14:44:08 +00:00
Kraiem Taha Yassine
74950dbe72
patch Api v1.16.0 (#1774)
* refactor(chalice): moved db_request_handler to utils package

* refactor(chalice): moved db_request_handler to utils package
fix(chalice): supported usability tests in EE
2023-12-13 14:49:51 +01:00
rjshrjndrn
82943ab19b chore(helm): Updating frontend image release 2023-12-13 12:27:36 +00:00
nick-delirium
be1ae8e89e fix(ui): change env.sample 2023-12-13 13:22:46 +01:00
rjshrjndrn
d17a32af30 upgrade: fix scripts 2023-12-13 09:26:01 +01:00
14 changed files with 146 additions and 99 deletions

View file

@ -2,7 +2,7 @@ import logging
from fastapi import HTTPException, status
from chalicelib.core.db_request_handler import DatabaseRequestHandler
from chalicelib.utils.db_request_handler import DatabaseRequestHandler
from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestSearch, UTTestUpdate
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.helper import dict_to_camel_case, list_to_camel_case

View file

@ -10,7 +10,7 @@ def transform_email(email: str) -> str:
def int_to_string(value: int) -> str:
return str(value) if isinstance(value, int) else int
return str(value) if isinstance(value, int) else value
def remove_whitespace(value: str) -> str:

1
ee/api/.gitignore vendored
View file

@ -274,3 +274,4 @@ Pipfile.lock
/orpy.py
/chalicelib/core/usability_testing/
/NOTES.md
/chalicelib/utils/db_request_handler.py

View file

@ -52,7 +52,7 @@ async def lifespan(app: FastAPI):
await events_queue.init()
app.schedule.start()
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.cron_jobs:
app.schedule.add_job(id=job["func"].__name__, **job)
ap_logger.info(">Scheduled jobs:")

View file

@ -1,11 +1,12 @@
import logging
from datetime import datetime
from fastapi import HTTPException
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from schemas import AssistStatsSessionsRequest, AssistStatsSessionsResponse, AssistStatsTopMembersResponse
logger = logging.getLogger(__name__)
event_type_mapping = {
"sessionsAssisted": "assist",
"assistDuration": "assist",
@ -17,12 +18,12 @@ event_type_mapping = {
def insert_aggregated_data():
try:
logging.info("Assist Stats: Inserting aggregated data")
end_timestamp = int(datetime.timestamp(datetime.now())) * 1000
end_timestamp = TimeUTC.now()
start_timestamp = __last_run_end_timestamp_from_aggregates()
if start_timestamp is None: # first run
logging.info("Assist Stats: First run, inserting data for last 7 days")
start_timestamp = end_timestamp - (7 * 24 * 60 * 60 * 1000)
start_timestamp = end_timestamp - TimeUTC.MS_WEEK
offset = 0
chunk_size = 1000
@ -103,9 +104,8 @@ def __last_run_end_timestamp_from_aggregates():
result = cur.fetchone()
last_run_time = result['last_run_time'] if result else None
if last_run_time is None: # first run handle all data
sql = "SELECT MIN(timestamp) as last_timestamp FROM assist_events;"
with pg_client.PostgresClient() as cur:
if last_run_time is None: # first run handle all data
sql = "SELECT MIN(timestamp) as last_timestamp FROM assist_events;"
cur.execute(sql)
result = cur.fetchone()
last_run_time = result['last_timestamp'] if result else None

View file

@ -1,10 +1,10 @@
import ast
import logging
from typing import List, Union
import schemas
from chalicelib.core import events, metadata, projects, performance_event, metrics
from chalicelib.core import events, metadata, projects, performance_event, metrics, sessions_legacy
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
import logging
logger = logging.getLogger(__name__)
SESSION_PROJECTION_COLS_CH = """\
@ -434,7 +434,6 @@ def search_table_of_individual_issues(data: schemas.SessionsSearchPayloadSchema,
full_args["issues_limit"] = data.limit
full_args["issues_limit_s"] = (data.page - 1) * data.limit
full_args["issues_limit_e"] = data.page * data.limit
print(full_args)
main_query = cur.format(f"""SELECT issues.type AS name,
issues.context_string AS value,
COUNT(DISTINCT raw_sessions.session_id) AS session_count,
@ -1391,15 +1390,15 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
_value_conditions_not.append(_p)
value_conditions_not.append(p)
del _value_conditions_not
sequence_conditions += value_conditions_not
# sequence_conditions += value_conditions_not
events_extra_join += f"""LEFT ANTI JOIN ( SELECT DISTINCT session_id
FROM {MAIN_EVENTS_TABLE} AS main
WHERE {' AND '.join(__events_where_basic)}
AND ({' OR '.join(value_conditions_not)})) AS sub USING(session_id)"""
# if has_values:
# events_conditions = [c for c in list(set(sequence_conditions))]
# events_conditions_where.append(f"({' OR '.join(events_conditions)})")
if has_values and len(sequence_conditions) > 0:
events_conditions = [c for c in list(set(sequence_conditions))]
events_conditions_where.append(f"({' OR '.join(events_conditions)})")
events_query_part = f"""SELECT main.session_id,
MIN(main.datetime) AS first_event_ts,
@ -1665,3 +1664,29 @@ def check_recording_status(project_id: int) -> dict:
"recordingStatus": row["recording_status"],
"sessionsCount": row["sessions_count"]
}
# TODO: rewrite this function to use ClickHouse
def search_sessions_by_ids(project_id: int, session_ids: list, sort_by: str = 'session_id',
ascending: bool = False) -> dict:
if session_ids is None or len(session_ids) == 0:
return {"total": 0, "sessions": []}
with pg_client.PostgresClient() as cur:
meta_keys = metadata.get(project_id=project_id)
params = {"project_id": project_id, "session_ids": tuple(session_ids)}
order_direction = 'ASC' if ascending else 'DESC'
main_query = cur.mogrify(f"""SELECT {sessions_legacy.SESSION_PROJECTION_BASE_COLS}
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
FROM public.sessions AS s
WHERE project_id=%(project_id)s
AND session_id IN %(session_ids)s
ORDER BY {sort_by} {order_direction};""", params)
cur.execute(main_query)
rows = cur.fetchall()
if len(meta_keys) > 0:
for s in rows:
s["metadata"] = {}
for m in meta_keys:
s["metadata"][m["key"]] = s.pop(f'metadata_{m["index"]}')
return {"total": len(rows), "sessions": helper.list_to_camel_case(rows)}

View file

@ -93,4 +93,5 @@ rm -rf ./schemas/overrides.py
rm -rf ./schemas/schemas.py
rm -rf ./schemas/transformers_validators.py
rm -rf ./orpy.py
rm -rf ./chalicelib/core/usability_testing/
rm -rf ./chalicelib/core/usability_testing/
rm -rf ./chalicelib/utils/db_request_handler.py

View file

@ -3,6 +3,8 @@ from apscheduler.triggers.interval import IntervalTrigger
from chalicelib.utils import events_queue
from chalicelib.core import assist_stats
from decouple import config
async def pg_events_queue() -> None:
events_queue.global_queue.force_flush()
@ -12,8 +14,14 @@ async def assist_events_aggregates_cron() -> None:
assist_stats.insert_aggregated_data()
ee_cron_jobs = [
{"func": pg_events_queue, "trigger": IntervalTrigger(minutes=5), "misfire_grace_time": 20, "max_instances": 1},
{"func": assist_events_aggregates_cron,
"trigger": IntervalTrigger(hours=1, start_date="2023-04-01 0:0:0", jitter=10), }
# SINGLE_CRONS are crons that will be run the crons-service, they are a singleton crons
SINGLE_CRONS = [{"func": assist_events_aggregates_cron,
"trigger": IntervalTrigger(hours=1, start_date="2023-04-01 0:0:0", jitter=10)}]
# cron_jobs is the list of crons to run in main API service (so you will have as many runs as the number of instances of the API)
cron_jobs = [
{"func": pg_events_queue, "trigger": IntervalTrigger(minutes=5), "misfire_grace_time": 20, "max_instances": 1}
]
if config("LOCAL_CRONS", default=False, cast=bool):
cron_jobs += SINGLE_CRONS

View file

@ -22,5 +22,5 @@ MINIO_ACCESS_KEY = ''
MINIO_SECRET_KEY = ''
# APP and TRACKER VERSIONS
VERSION = 1.14.0
TRACKER_VERSION = '9.0.0'
VERSION = 1.16.1
TRACKER_VERSION = '11.0.1'

View file

@ -5,9 +5,9 @@ original_env_file="$1"
# Check if the original env file exists and is not empty
if [ ! -s "$original_env_file" ]; then
echo "Error: The original env file is empty or does not exist."
echo "Usage: $0 /path/to/original.env"
exit 1
echo "Error: The original env file is empty or does not exist."
echo "Usage: $0 /path/to/original.env"
exit 1
fi
new_env_file="./common.env"
@ -15,99 +15,111 @@ temp_env_file=$(mktemp)
# Function to merge environment variables from original to new env file
function merge_envs() {
while IFS='=' read -r key value; do
# Skip the line if the key is COMMON_VERSION
case "$key" in
COMMON_VERSION)
original_version=$(echo "$value" | xargs)
continue
;;
COMMON_PG_PASSWORD)
pgpassword=$value
;;
POSTGRES_VERSION | REDIS_VERSION | MINIO_VERSION)
# Don't update db versions automatically.
continue
;;
esac
while IFS='=' read -r key value; do
# Skip the line if the key is COMMON_VERSION
case "$key" in
COMMON_VERSION)
original_version=$(echo "$value" | xargs)
continue
;;
COMMON_PG_PASSWORD)
pgpassword=$(echo $value | xargs)
;;
POSTGRES_VERSION | REDIS_VERSION | MINIO_VERSION)
# Don't update db versions automatically.
continue
;;
esac
# Remove any existing entry from the new env file and add the new value
grep -v "^$key=" "$new_env_file" >"$temp_env_file"
mv "$temp_env_file" "$new_env_file"
echo "$key=$value" >>"$new_env_file"
done <"$original_env_file"
# Remove any existing entry from the new env file and add the new value
grep -v "^$key=" "$new_env_file" >"$temp_env_file"
mv "$temp_env_file" "$new_env_file"
echo "$key=$value" >>"$new_env_file"
done <"$original_env_file"
}
# Function to normalize version numbers for comparison
function normalise_version {
echo "$1" | awk -F. '{ printf("%03d%03d%03d\n", $1, $2, $3); }'
echo "$1" | awk -F. '{ printf("%03d%03d%03d\n", $1, $2, $3); }'
}
# Function to log messages
function log_message() {
echo "$@" >&2
echo "$@" >&2
}
# Function to create migration versions based on the current and previous application versions
function create_migration_versions() {
cd "${SCHEMA_DIR:-/opt/openreplay/openreplay/scripts/schema}" || {
log_message "not able to cd $SCHEMA_DIR"
exit 100
}
SCHEMA_DIR="../schema/"
cd $SCHEMA_DIR || {
log_message "not able to cd $SCHEMA_DIR"
exit 100
}
db=postgresql
# List all version directories excluding 'create' directory
all_versions=($(find db/init_dbs/$db -maxdepth 1 -type d -exec basename {} \; | grep -v create))
db=postgresql
# List all version directories excluding 'create' directory
all_versions=($(find db/init_dbs/$db -maxdepth 1 -type d -exec basename {} \; | grep -v create))
# Normalize the previous application version for comparison
PREVIOUS_APP_VERSION_NORMALIZED=$(normalise_version "${PREVIOUS_APP_VERSION}")
# Normalize the previous application version for comparison
PREVIOUS_APP_VERSION_NORMALIZED=$(normalise_version "${PREVIOUS_APP_VERSION}")
migration_versions=()
for ver in "${all_versions[@]}"; do
if [[ $(normalise_version "$ver") > "$PREVIOUS_APP_VERSION_NORMALIZED" ]]; then
migration_versions+=("$ver")
fi
done
migration_versions=()
for ver in "${all_versions[@]}"; do
if [[ $(normalise_version "$ver") > "$PREVIOUS_APP_VERSION_NORMALIZED" ]]; then
migration_versions+=("$ver")
fi
done
# Join migration versions into a single string separated by commas
joined_migration_versions=$(
IFS=,
echo "${migration_versions[*]}"
)
# Join migration versions into a single string separated by commas
joined_migration_versions=$(
IFS=,
echo "${migration_versions[*]}"
)
# Return to the previous directory
cd - >/dev/null || {
log_message "not able to cd back"
exit 100
}
# Return to the previous directory
cd - >/dev/null || {
log_message "not able to cd back"
exit 100
}
log_message "output: $joined_migration_versions"
echo "$joined_migration_versions"
log_message "output: $joined_migration_versions"
echo "$joined_migration_versions"
}
export SCHEMA_DIR="$(readlink -f ../schema/)"
echo $SCHEMA_DIR
# Function to perform migration
function migrate() {
# Set schema directory and previous application version
export SCHEMA_DIR="../schema/"
export PREVIOUS_APP_VERSION=${original_version#v}
# Set schema directory and previous application version
export PREVIOUS_APP_VERSION=${original_version#v}
# Create migration versions array
IFS=',' read -ra joined_migration_versions <<<"$(create_migration_versions)"
# Check if there are versions to migrate
[[ ${#joined_migration_versions[@]} -eq 0 ]] && {
echo "Nothing to migrate"
return
}
# Loop through versions and prepare Docker run commands
for ver in "${joined_migration_versions[@]}"; do
echo "$ver"
"docker run --rm --network openreplay-net \
--name pgmigrate -e 'PGHOST=postgres' -e 'PGPORT=5432' \
-e 'PGDATABASE=postgres' -e 'PGUSER=postgres' -e 'PGPASSWORD=$pgpassword' \
-v /opt/data/:$SCHEMA_DIR postgres psql -f /opt/data/schema/db/init_dbs/postgresql/$ver/$ver.sql"
done
# Create migration versions array
IFS=',' read -ra joined_migration_versions <<<"$(create_migration_versions)"
# Check if there are versions to migrate
[[ ${#joined_migration_versions[@]} -eq 0 ]] && {
echo "Nothing to migrate"
return
}
# Loop through versions and prepare Docker run commands
for ver in "${joined_migration_versions[@]}"; do
echo "$ver"
docker run --rm --network docker-compose_opereplay-net \
--name pgmigrate -e PGHOST=postgres -e PGPORT=5432 \
-e PGDATABASE=postgres -e PGUSER=postgres -e PGPASSWORD=$pgpassword \
-v $SCHEMA_DIR:/opt/data/ postgres psql -f /opt/data/db/init_dbs/postgresql/$ver/$ver.sql
done
}
# Merge environment variables and perform migration
merge_envs
migrate
# Load variables from common.env into the current shell's environment
set -a # automatically export all variables
source common.env
set +a
# Use the `envsubst` command to substitute the shell environment variables into reference_var.env and output to a combined .env
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}";cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
sudo -E docker-compose up -d

View file

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.1
version: 0.1.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
AppVersion: "v1.16.0"
AppVersion: "v1.16.1"

View file

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.7
version: 0.1.11
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
AppVersion: "v1.16.0"
AppVersion: "v1.16.4"

View file

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (frontends://semver.org/)
version: 0.1.10
version: 0.1.11
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
AppVersion: "v1.16.0"
AppVersion: "v1.16.1"