-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdiary_handlers.py
142 lines (114 loc) · 4.92 KB
/
diary_handlers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
from flask import request, jsonify
from diary_service import create_diary_with_prediction, update_diary_with_prediction, generate_activity_recommendation, get_diaries_by_user, delete_diary
import tensorflow as tf
import numpy as np
# Load the machine learning model
MODEL_PATH = 'model_lstm_new_2.h5'
VOCAB_PATH = 'indonesian_vocab.txt'
MAX_SEQ_LENGTH = 20
print("Loading ML model...")
ml_model = tf.keras.models.load_model(MODEL_PATH)
print("ML model loaded successfully.")
# Load vocabulary
def load_vocabulary(vocab_path):
with open(vocab_path, 'r', encoding='utf-8') as f:
vocab = f.read().splitlines()
return {word: idx for idx, word in enumerate(vocab)}
vocab = load_vocabulary(VOCAB_PATH)
# Tokenize and preprocess text
def preprocess_text(text, max_seq_length, vocab, unknown_token="[UNK]"):
tokens = [
vocab.get(word, vocab.get(unknown_token)) for word in text.lower().split()
]
padded_tokens = [0] * (max_seq_length - len(tokens)) + tokens[:max_seq_length]
return np.array([padded_tokens])
def get_diaries_handler():
try:
user_id = request.args.get("userId") # Ambil userId dari query parameter
if not user_id:
return jsonify({"error": "User ID is required"}), 400
diaries = get_diaries_by_user(user_id)
if not diaries:
return jsonify({"message": "No diaries found for the user"}), 404
return jsonify({"userId": user_id, "diaries": diaries}), 200
except Exception as e:
return jsonify({"error": f"Failed to fetch diaries: {str(e)}"}), 500
# Create a diary with ML prediction and activity recommendation
def create_diary_handler():
try:
data = request.json
content = data.get("content", "")
if not content:
return jsonify({"error": "Content is required"}), 400
# Preprocess content for prediction
preprocessed_input = preprocess_text(content, MAX_SEQ_LENGTH, vocab)
# Predict the emotion
predictions = ml_model.predict(preprocessed_input)
predicted_class = int(np.argmax(predictions)) # Get the predicted class
# Map predicted class to emotion
emotion_mapping = {
0: "sedih",
1: "senang",
2: "love",
3: "marah",
4: "takut",
5: "terkejut"
}
predicted_emotion = emotion_mapping.get(predicted_class, "Unknown")
# Generate mood category
mood_category = "Good Mood" if predicted_emotion in ["senang", "love"] else "Bad Mood"
# Generate activity recommendation
activity_recommendation = generate_activity_recommendation(predicted_emotion)
# Save the diary with the predicted class and recommendation
diary_id = create_diary_with_prediction(data, predicted_emotion, mood_category)
return jsonify({
"diaryId": diary_id,
"predictedEmotion": predicted_emotion,
"moodCategory": mood_category,
"activityRecommendation": activity_recommendation
}), 201
except Exception as e:
return jsonify({"error": str(e)}), 500
# Update a diary with ML prediction and activity recommendation
def update_diary_handler(id):
try:
data = request.json
content = data.get("content", "")
if not content:
return jsonify({"error": "Content is required"}), 400
# Preprocess content for prediction
preprocessed_input = preprocess_text(content, MAX_SEQ_LENGTH, vocab)
# Predict the emotion
predictions = ml_model.predict(preprocessed_input)
predicted_class = int(np.argmax(predictions)) # Get the predicted class
# Map predicted class to emotion
emotion_mapping = {
0: "sedih",
1: "senang",
2: "love",
3: "marah",
4: "takut",
5: "terkejut"
}
predicted_emotion = emotion_mapping.get(predicted_class, "Unknown")
# Generate mood category
mood_category = "Good Mood" if predicted_emotion in ["senang", "love"] else "Bad Mood"
# Generate activity recommendation
activity_recommendation = generate_activity_recommendation(predicted_emotion)
# Update the diary with the predicted class and recommendation
updated = update_diary_with_prediction(id, data, predicted_emotion, mood_category)
if not updated:
return jsonify({"error": "Diary not found or update failed"}), 404
return jsonify({
"message": "Diary updated successfully",
"predictedEmotion": predicted_emotion,
"moodCategory": mood_category,
"activityRecommendation": activity_recommendation
}), 200
except Exception as e:
return jsonify({"error": str(e)}), 500
def delete_diary_handler(id):
try:
return delete_diary(id)
except Exception as e:
return jsonify({"error": f"Failed to delete diary: {str(e)}"}), 500