-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathanimated_background.py
194 lines (166 loc) · 6.67 KB
/
animated_background.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
#!/usr/bin/env python
import json
import urllib2
import os
import os.path
import urllib
from urlparse import urlparse, urljoin
from optparse import OptionParser
from bs4 import BeautifulSoup
from slideshow import make_xml
REDDIT_DIR = os.path.expanduser('~/.redditbackgrounds')
SUBREDDIT_URL = "http://www.reddit.com/r/"
USER_AGENT = 'A Reddit SFW-porn scrapper (https://github.com/michaelsergio/Animated-Background)'
VALID_EXTS = ['.jpg', '.png', '.gif']
SFW_PORN_NETWORK_COMPLEX = {
"elemental": "earthporn+waterporn+skyporn+fireporn+destructionporn+spaceporn",
"synthetic": "CityPorn+VillagePorn+AbandonedPorn+InfrastructurePorn+MachinePorn+MilitaryPorn" ,
"organic": "AnimalPorn+BotanicalPorn+HumanPorn+AdrenalinePorn",
"aesthetic": "DesignPorn+AlbumArtPorn+MoviePosterPorn+AdPorn+GeekPorn+RoomPorn",
"scholastic": "HistoryPorn+MapPorn+BookPorn+NewsPorn+QuotesPorn"
}
SFW_PORN_NETWORK = [
'earth', 'water', 'sky', 'space', 'fire', 'destruction',
'city', 'village', 'abandoned', 'infrastructure', 'machine', 'military',
'cemetery', 'architecture', 'car', 'gun',
'animal', 'botanical', 'human', 'adrenaline', 'climbing', 'culinary',
'dessert', 'agriculture',
'design', 'albumart', 'movieposter', 'ad', 'geek', 'room', 'instrument',
'macro', 'art', 'fractal', 'exposure',
'history', 'map', 'book', 'news', 'quotes'
]
def download_images(url):
images = []
if not os.path.exists(REDDIT_DIR):
os.makedirs(REDDIT_DIR)
try:
contents = urllib.urlopen(url).read()
listing = json.loads(contents)
if 'error' in listing:
errno = listing['error']
v_log("Error: %s" % errno, failure=True)
if errno == 429:
v_log('Too many requests. Wait 30 sec and try again!',
failure=True)
exit()
picture_listings = listing['data']['children']
for pic in picture_listings:
data = pic['data']
# prevent filename length errors by limiting title size
title = data['title'][:124]
# remove any illegal characters from the title string
title = title.replace('/', '-')
pic_url = data['url']
ext = os.path.splitext(pic_url)[1]
valid = ext and ext in VALID_EXTS
if not valid:
# try checking html for image
pic_url = get_largest_image_from_html(pic_url)
ext = os.path.splitext(pic_url)[1]
valid = ext and ext in VALID_EXTS
path = os.path.join(REDDIT_DIR, title + ext)
if not os.path.exists(path) and valid:
v_log("Creating '%s' from %s" % (title + ext, pic_url))
try:
urllib.urlretrieve(pic_url, path)
images.append(path)
except IOError as ex:
v_log("Unable to read %s" % pic_url, failure=True)
v_log("Reason: {0}".format(ex), failure=True)
else:
v_log("Ignoring %s" % pic_url, failure= not valid)
if valid:
images.append(path)
except IOError as ex:
v_log('IO Error: {0}'.format(ex), failure=True)
return images
def v_log(string, failure=False):
if OPTIONS.failures:
if failure:
print(string)
else:
if OPTIONS.verbose:
print(string)
class MyOpener(urllib.FancyURLopener):
version = USER_AGENT
def get_largest_image_from_html(url):
# assuming html
imgs = []
try:
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
# return the largest image
tags = soup(images_with_width_and_height)
imgs = [int(t.attrs['width']) * int(t.attrs['height']) for t in tags]
if imgs:
idx = imgs.index(max(imgs))
src = tags[idx]['src']
# if image comes back like /hello/world
# add net_loc: http://example.com/hello/world
src_parse = urlparse(src)
if src_parse.netloc == '':
img_url = urljoin(url, src_parse.netloc)
else:
img_url = src
return img_url
else: return None
except IOError as ex:
v_log("Unable to read %s" % url, failure=True)
v_log("Reason: {0}".format(ex), failure=True)
def images_with_width_and_height(tag):
return tag.name == 'img' and \
tag.has_key('width') and \
tag.has_key('height') and \
tag.has_key('src')
def parse_networks(networks_given):
urls = []
reddit_networks = []
for network in networks_given.split('+'):
if network.lower() in SFW_PORN_NETWORK_COMPLEX:
reddit_networks.append(networks_given[network])
elif network in SFW_PORN_NETWORK:
reddit_networks.append("%sporn" % network)
elif network.startswith('http'):
urls.append(network)
else:
# New SFWPorn name?
# Let's try it out
reddit_networks.append("%sporn" % network)
urls.append("%s%s.json" % (SUBREDDIT_URL, '+'.join(reddit_networks)))
return urls
def print_network_list():
all_networks = SFW_PORN_NETWORK_COMPLEX.keys() + SFW_PORN_NETWORK
print '\n'.join(sorted(all_networks))
if __name__ == '__main__':
# Check for command line flags
PARSER = OptionParser()
PARSER.add_option("-v", "--verbose", action="store_true",
help="explain what is being done")
PARSER.add_option("-f", "--failures", action="store_true",
help="log only failures")
PARSER.add_option("-d", "--delay", default="60",
help="sets the delay in between the slideshow (in seconds)")
PARSER.add_option("-n", "--networks", default="space",
help="The networks to use. Use + for multiple (space+earth)")
PARSER.add_option("-l", "--list", action="store_true",
help="Lists all know networks")
(OPTIONS, ARGS) = PARSER.parse_args()
# Set my user agent to get paste urllib bans
OPENER = MyOpener()
urllib._urlopener = OPENER
urllib2._urlopener = OPENER
if OPTIONS.list:
print_network_list()
exit()
# retrieve all images
urls_to_use = parse_networks(OPTIONS.networks)
all_images = []
for url in urls_to_use:
all_images += download_images(url)
xml_name = "slideshow.xml"
xml_path = os.path.join(REDDIT_DIR, xml_name)
xml_file = open(xml_path, 'w')
make_xml(all_images, int(OPTIONS.delay)).write(xml_file)
# make an OS call to run the gsettings command to set the background
os.system('gsettings set org.gnome.desktop.background picture-uri file://%s'
% xml_path)