modified to for multi user tracking
This commit is contained in:
@@ -1,39 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
# User ID (can be changed easily)
|
||||
USER_ID="667"
|
||||
# Source the configuration file
|
||||
source ./user_ids.conf
|
||||
|
||||
# URL of the webpage to extract the data from
|
||||
URL="https://www.prismatic-imperium.com/user_page.php?user=$USER_ID"
|
||||
# URL template for the webpage to extract the data from
|
||||
URL_TEMPLATE="https://www.prismatic-imperium.com/user_page.php?user="
|
||||
|
||||
# CSV file to store the extracted data
|
||||
CSV_FILE="/home/eric/assets_data.csv"
|
||||
# Directory where CSV files will be stored
|
||||
OUTPUT_DIR="/home/eric/asset_tracker"
|
||||
|
||||
# Check if the CSV file exists, if not, create it with headers
|
||||
if [ ! -f "$CSV_FILE" ]; then
|
||||
echo "CSV file does not exist. Creating it with headers."
|
||||
echo "date,rank,diamonds,total_assets" > "$CSV_FILE"
|
||||
fi
|
||||
# Create output directory if it doesn't exist
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Fetch the webpage content and extract all the values within the user_assets_number span
|
||||
values=$(curl -s "$URL" | grep -oP '(?<=<span class="user_assets_number">)[^<]+')
|
||||
# Function to process a single user ID
|
||||
process_user() {
|
||||
local USER_ID="$1"
|
||||
local URL="${URL_TEMPLATE}${USER_ID}"
|
||||
local CSV_FILE="${OUTPUT_DIR}/assets_data_${USER_ID}.csv"
|
||||
|
||||
# Convert values to an array (this splits the values by newline)
|
||||
IFS=$'\n' read -r -d '' -a value_array <<< "$values"
|
||||
# Check if the CSV file exists, if not, create it with headers
|
||||
if [ ! -f "$CSV_FILE" ]; then
|
||||
echo "CSV file does not exist. Creating it with headers."
|
||||
echo "date,rank,diamonds,total_assets" > "$CSV_FILE"
|
||||
fi
|
||||
|
||||
# Remove commas from each value
|
||||
user_rank=$(echo "${value_array[0]}" | tr -d ',')
|
||||
liquid_assets=$(echo "${value_array[1]}" | tr -d ',')
|
||||
total_assets=$(echo "${value_array[2]}" | tr -d ',')
|
||||
# Fetch the webpage content and extract all the values within the user_assets_number span
|
||||
values=$(curl -s "$URL" | grep -oP '(?<=<span class="user_assets_number">)[^<]+')
|
||||
|
||||
# Get the current date and time
|
||||
current_datetime=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
# Convert values to an array (this splits the values by newline)
|
||||
IFS=$'\n' read -r -d '' -a value_array <<< "$values"
|
||||
|
||||
# Check if we have exactly 3 values (rank, diamonds, total_assets)
|
||||
if [ ${#value_array[@]} -eq 3 ]; then
|
||||
# Append the values to the CSV file
|
||||
echo "$current_datetime,$user_rank,$liquid_assets,$total_assets" >> "$CSV_FILE"
|
||||
else
|
||||
# Error handling: print a message if values were not extracted properly
|
||||
echo "Error: Could not extract all values or incorrect number of values." >&2
|
||||
fi
|
||||
# Remove commas from each value
|
||||
user_rank=$(echo "${value_array[0]}" | tr -d ',')
|
||||
liquid_assets=$(echo "${value_array[1]}" | tr -d ',')
|
||||
total_assets=$(echo "${value_array[2]}" | tr -d ',')
|
||||
|
||||
# Get the current date and time
|
||||
current_datetime=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# Check if we have exactly 3 values (rank, diamonds, total_assets)
|
||||
if [ ${#value_array[@]} -eq 3 ]; then
|
||||
# Append the values to the CSV file
|
||||
echo "$current_datetime,$user_rank,$liquid_assets,$total_assets" >> "$CSV_FILE"
|
||||
else
|
||||
# Error handling: print a message if values were not extracted properly
|
||||
echo "Error: Could not extract all values or incorrect number of values for user $USER_ID." >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Process each user ID in the array
|
||||
for USER_ID in "${USER_IDS[@]}"; do
|
||||
echo "Processing user ID: $USER_ID"
|
||||
process_user "$USER_ID"
|
||||
done
|
||||
|
||||
echo "All users processed."
|
||||
|
||||
Reference in New Issue
Block a user