This repository has been archived by the owner on Jul 12, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
354 lines (301 loc) · 13.2 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
import base64
import requests
import json
import re
import time
import csv
import os
from twython import Twython
twitterAppKey = os.environ.get('twitterAppKey', '')
twitterAppSecret = os.environ.get('twitterAppSecret', '')
twitterOAuthToken = os.environ.get('twitterOAuthToken', '')
twitterOAuthTokenSecret = os.environ.get('twitterOAuthTokenSecret', '')
appId = os.environ.get('appId', '')
appSecret = os.environ.get('appSecret', '')
username = os.environ.get('username', '')
password = os.environ.get('password', '')
auth = requests.auth.HTTPBasicAuth(appId, appSecret)
auth_base_url = 'https://www.reddit.com/'
api_base_url= 'https://oauth.reddit.com'
userAgent = 'Subreddits Trends by GordonTheDeveloper'
subReddits = ['wallstreetbets','StockMarket','stocks','investing','options']
stock_list_endpoint = 'https://www.alphavantage.co/query?function=LISTING_STATUS&apikey='
alphaVantageApiKey = os.environ.get('alphaVantageApiKey', '')
postType = 'top'
postLimit = 100
mentionCountThreshold = 10
upvoteRatioThreshold = 0.6
scoreThreshold = 10
commentScoreThreshold = 1
commentSearchDepth = 6
debug = False
def reddit_trends_analysis(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
#pubsub_message = base64.b64decode(event['data']).decode('utf-8')
#print(pubsub_message)
commonEnglishWords = []
symbolMentionMap = {}
symbolsSet = set()
def addSymbol(symbol):
symbolsSet.add(symbol.upper())
def readCommonWordsFromFile(fileName):
words = []
with open(fileName+'.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
for word in row:
words.append(word)
return words
def extractStockSymbolFromCSV(fileName):
symbols =set()
with open(fileName+'.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
if len(row) > 0:
symbol = row[0]
symbol = symbol.split('-')[0]
symbols.add(symbol)
#print(symbols)
return symbols
def getStockList(apiKey):
symbols =set()
url = stock_list_endpoint + apiKey
try:
response = requests.get(url)
if response.status_code == 200:
spamreader = csv.reader(response.text.splitlines(), delimiter=',')
skippedFirstRow = False
for row in spamreader:
if not skippedFirstRow:
skippedFirstRow = True
#skip the first row because it is the column titles
continue
if len(row) > 0:
symbol = row[0]
symbol = symbol.split('-')[0]
symbols.add(symbol)
else:
print(response.text)
except Exception as e:
print(str(e))
print('no. of symbols from Alpha Vantage:' + str(len(symbols)) + '\n')
return symbols
def filterWordSetForStockSearch(unfilteredArray):
filteredWordSet = set()
if len(unfilteredArray) < 2:
return filteredWordSet #ignore content with one word only
for word in unfilteredArray:
trimmedWord = word.replace('$','')#trim $ symbol
if len(trimmedWord) > 5 or len(word) < 2:
continue #ignore long words that cannot be stock symbol, only single letter with $ sign is considered a stock symbol, e.g. $U
if any(chr.isdigit() for chr in trimmedWord):
continue #ignore numbers
if "'" in trimmedWord:
continue
if trimmedWord.lower() != trimmedWord and trimmedWord.upper() != trimmedWord:
continue #only check words that all upper or lower case
if trimmedWord in filteredWordSet:
continue
if word[0] != '$' and trimmedWord in commonEnglishWords:
continue
filteredWordSet.add(trimmedWord.upper())
return filteredWordSet
def convertContentToWordArray(content):
wordArray = re.findall(r"[\w'$]+", content)
return wordArray
def checkMentionsFromContentSet(author, contentSet, keywordsSet):
if len(contentSet) == 0:
return
#print( author +": " + ascii(' / '.join(contentSet)))
for word in keywordsSet:
if word in contentSet:
if word not in symbolMentionMap:
symbolMentionMap[word] = set()
mentionAuthorSet = symbolMentionMap[word]
mentionAuthorSet.add(author)
def getAccessToken(username, password):
try:
response = requests.post(auth_base_url + 'api/v1/access_token',
data={'grant_type': 'password', 'username': username, 'password': password},
headers={'User-Agent' : userAgent},
auth=auth)
if response.status_code == 200:
responseJsonObject = response.json()
return responseJsonObject['token_type'] + " " + responseJsonObject['access_token']
print('response.status_code: ' + str(response.status_code))
return None
except Exception as e:
print(str(e))
return None
def fixJsonString(brokenJsonString):
fixedJsonString = brokenJsonString.replace(":\"\"",":\" \"")
fixedJsonString = fixedJsonString.replace("\\\"","\\\" ")
fixedJsonString = fixedJsonString.replace("\"\"","")
fixedJsonString = fixedJsonString.replace(": ,", ":\" \",")
return fixedJsonString
def parseComment(commentData, level):
if 'body' not in commentData:
return
commentBody = commentData['body']
author = commentData['author']
score = commentData['score']
if author == 'AutoModerator':
return
if score >= commentScoreThreshold:
filteredWords = filterWordSetForStockSearch(convertContentToWordArray(commentBody))
checkMentionsFromContentSet(author,filteredWords,symbolsSet)
if debug:
indent = ">"
i = 0
while i < level:
indent += ">"
i+=1
print(indent + '('+ author + ')' + ascii(' / '.join(filteredWords)))
if 'replies' not in commentData:
return
replies = commentData['replies']
if isinstance(replies, str) :
return
repliesData = replies['data']
subCommentsArray = repliesData['children']
for subComment in subCommentsArray:
subCommentData = subComment['data']
parseComment(subCommentData, level + 1)
def getComments(permalink, token, depth, sort):
payload = {'showmedia':False, 'depth':depth, 'sort':sort}
url = api_base_url + permalink
if debug:
print(url)
try:
response = requests.get(url,
params=payload,
headers={'Authorization' : token, 'User-Agent' : userAgent})
if response.status_code == 200:
jsonString = fixJsonString(response.text)
responseJsonObject = json.loads(jsonString)
for listing in responseJsonObject:
commentsArray = listing['data']['children']
for comment in commentsArray:
commentData = comment['data']
parseComment(commentData, 0)
else:
print(response.text)
except Exception as e:
print(str(e))
def getCount(data):
return data['count']
def getSubRedditPost(subRedditName,token,type, limit):
payload = {'g':'GLOBAL','limit':limit}
if type == 'top':
payload['t'] = 'day'
url = api_base_url + '/r/' + subRedditName + '/' + type
try:
response = requests.get(url,
params=payload,
headers={'Authorization' : token, 'User-Agent' : userAgent})
if response.status_code == 200:
jsonString = fixJsonString(response.text)
responseJsonObject = json.loads(jsonString)
postsArray = responseJsonObject['data']['children']
call = True
for post in postsArray:
data = post['data']
title = data['title']
content = data['selftext']
author = data['author']
upvoteRatio = data['upvote_ratio']
score = data['score']
if author == 'AutoModerator':
continue
if upvoteRatio < upvoteRatioThreshold or score < scoreThreshold:
continue
filteredWords = filterWordSetForStockSearch(convertContentToWordArray(title + " " + content))
checkMentionsFromContentSet(author,filteredWords,symbolsSet)
if debug:
print( '* ' + '('+ author + ')' + ascii(' / '.join(filteredWords)))
postUrl = data['permalink']
getComments(postUrl, token, commentSearchDepth, 'top')
time.sleep(1)
else:
print(response.text)
except Exception as e:
print(str(e))
def saveResults(minimum):
result = []
for key in symbolMentionMap.keys():
mentionSet = symbolMentionMap[key]
result.append({'symbol':key, 'count':len(mentionSet)})
result.sort(key=getCount)
result.reverse()
secondsSinceEpoch = round(time.time())
with open(str(secondsSinceEpoch)+'.csv', mode='w', newline='') as resultFile:
resultWriter = csv.writer(resultFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for data in result:
count = data['count']
if count > minimum:
resultWriter.writerow([data['symbol'], str(count)])
print(data['symbol'] + " : " + str(count))
else:
break
def printResults(minimum):
result = []
for key in symbolMentionMap.keys():
mentionSet = symbolMentionMap[key]
result.append({'symbol':key, 'count':len(mentionSet)})
result.sort(key=getCount)
result.reverse()
for data in result:
count = data['count']
if count > minimum:
print(data['symbol'] + " : " + str(count))
time.sleep(0.1)
else:
break
def tweetResults(numberOfSymbols):
result = []
for key in symbolMentionMap.keys():
mentionSet = symbolMentionMap[key]
result.append({'symbol':key, 'count':len(mentionSet)})
if len(result) >= numberOfSymbols:
result.sort(key=getCount)
result.reverse()
tweet = str(numberOfSymbols) + ' most mentioned US stocks on Reddit in past 24 hours:\n'
v = 1
for data in result:
count = data['count']
print(data['symbol'] + " : " + str(count))
tweet += str(v) + '. $' + data['symbol'] + " : " + str(count) + '\n'
v += 1
if v > numberOfSymbols:
break
tweet += "#stock #trading #usstock #stockmarket"
postTweet(tweet)
def postTweet(content):
if len(content) <= 256:
twitter = Twython(twitterAppKey, twitterAppSecret, twitterOAuthToken, twitterOAuthTokenSecret)
twitter.update_status(status=content)
else:
print('content is too long to tweet: ' + str(len(content)))
#execution begin
beginTimestamp = time.time()
print('Stock trend analysis begin: ' + str(beginTimestamp))
commonEnglishWords = readCommonWordsFromFile('commonWords')
symbolsSet = getStockList(alphaVantageApiKey)
if len(symbolsSet) == 0:
print('Warning: failed to get updated stock list, fallback to older version\n')
symbolsSet = extractStockSymbolFromCSV('listing_status')
token = getAccessToken(username, password)
if token != None:
for sub in subReddits:
print('retriving ' + postType + ' posts data from r/' + sub + ', limit: ' + str(postLimit))
getSubRedditPost(sub, token, postType, postLimit)
#printResults(mentionCountThreshold)
tweetResults(10)
else:
print('failed to get token')
endTimestamp = time.time()
print('Stock trend analysis completed: ' + str(endTimestamp) + ', time taken: ' + str(endTimestamp - beginTimestamp))