import spotipy from spotipy.oauth2 import SpotifyOAuth import time import os import threading import random # from analysis import * #from fcache.cache import FileCache # Initialise the spotify API. Redirect to localhost for login. sp = spotipy.Spotify(auth_manager=SpotifyOAuth( client_id="8f28a9b0d5b54a65bdbc6e8891894623", client_secret="c5a20e1680f346158000be6536274703", redirect_uri="http://localhost:5000/callback/", scope="user-library-read, user-read-currently-playing, user-read-playback-state, user-modify-playback-state")) clear = lambda: os.system('cls') import socket import time s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s3 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s5 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) host = '192.168.1.180' # ip of raspberry pi host2 = '192.168.1.181' host3 = '192.168.1.182' host4 = '192.168.1.183' host5 = '192.168.1.184' port = 4210 s1.connect((host, port)) s2.connect((host2, port)) s3.connect((host3, port)) s4.connect((host4, port)) s5.connect((host5, port)) sockets = [s1, s2, s3, s4, s5] def send_data(data): threads = list() for i in sockets: x = threading.Thread(target=send_data_single,args=(i, data,)) threads.append(x) x.start() for thread in threads: thread.join() # senddata = data.encode() # s1.send(senddata) # s2.send(senddata) def send_data_single(i, data): senddata = data.encode() i.send(senddata) # print(data) colors = [0, 255, 100] animations = [1, 2, 3, 4, 5, 6, 7] class LEDSYNC(): def __init__(self): self.progress_ms = 0 self.current_track_data = {} self.current_audio_analysis = {} self.current_audio_features = {} self.track_start_time = 0 self.current_progress_ms = 0 self.is_playing = True self.delaytime = 0 self.correcting = 0 thread1 = threading.Thread(target=self.track_data) thread2 = threading.Thread(target=self.time_thread) self.thread3 = threading.Thread(target=self.brightness) self.thread4 = threading.Thread(target=self.sections) self.thread5 = threading.Thread(target=self.tatums) thread1.start() thread2.start() def fetch_new_audiodata(self): starttime = time.time() self.current_audio_analysis = sp.audio_analysis(self.current_track_data["item"]["id"]) print(time.time()-starttime) self.current_audio_features = sp.audio_features(self.current_track_data["item"]["id"]) print(time.time()-starttime) self.correcting = time.time()-starttime print(self.correcting) for i in self.current_audio_analysis["sections"]: print(i["loudness"]) def time_thread(self): while True: if self.is_playing: self.current_progress_ms = time.time() - self.track_start_time + 2.5 + self.delaytime # print(self.delaytime) # print(self.correcting) # print(self.current_progress_ms) # clear() def brightness(self): while True: try: for i in self.current_audio_analysis["segments"]: if (self.current_progress_ms > i["start"] + i["duration"]) or (self.current_progress_ms < i["start"]): continue else: NewValue = (((i["loudness_start"] +60)**2.5 * 255) / 65**2.5) send_data(f'bright:{int(NewValue)}&') while i["start"] + i["duration"] > self.current_progress_ms: # print(self.current_progress_ms) if self.current_progress_ms < i["start"]: break time.sleep(0.01) # print(self.current_progress_ms) # print(i["start"]) # clear() # print(int(NewValue)) except: pass def find_bar(self, section, tempo): for n, i in enumerate(self.current_audio_analysis["bars"]): if n < len(self.current_audio_analysis["bars"])-1: if abs(section["start"] - self.current_audio_analysis["bars"][n]["start"]) < abs(section["start"] - self.current_audio_analysis["bars"][n+1]["start"]): print("OG") print(self.current_audio_analysis["bars"][n]["start"]) print(section["start"]) return self.current_audio_analysis["bars"][n]["start"] # send_data(f'bpm:{int(tempo)}&anim:{random.randint(2,10)}&dir:{random.randint(0,1)}&c1:{random.randint(0,255)}&c1_g:{colors[1]}&c1_b:{colors[2]}&') return def sections(self): prev_end = 0 while True: try: for n, i in enumerate(self.current_audio_analysis["sections"]): if (self.current_progress_ms > i["start"] + i["duration"]) or (self.current_progress_ms < i["start"]): continue else: # send_data(f'bpm:{int(i["tempo"])}&anim:{random.randint(2,10)}&dir:0&c1:{random.randint(0,255)}&c1_g:{colors[1]}&c1_b:{colors[2]}&') if n == 0: send_data(f'bpm:{int(i["tempo"])}&anim:{random.randint(2,10)}&dir:{random.randint(0,1)}&c1:{random.randint(0,255)}&c1_g:{colors[1]}&c1_b:{colors[2]}&') if n < len(self.current_audio_analysis["sections"])-1: endtime = self.find_bar(self.current_audio_analysis["sections"][n+1], i["tempo"]) if endtime == prev_end: break send_data(f'bpm:{int(i["tempo"])}&anim:{random.randint(2,10)}&dir:{random.randint(0,1)}&c1:{random.randint(0,255)}&c1_g:{colors[1]}&c1_b:{colors[2]}&') prev_end = endtime while endtime > self.current_progress_ms: if self.current_progress_ms < i["start"]: break time.sleep(0.01) # print(self.current_progress_ms) # print(i["start"]) # clear() # clear() # print(int(NewValue)) except: pass def tatums(self): while True: try: for n, i in enumerate(self.current_audio_analysis["tatums"]): if (self.current_progress_ms > i["start"] + i["duration"]) or (self.current_progress_ms < i["start"]): continue else: # print(n)# send_data(f'bpm:{int(i["tempo"])}&anim:{random.randint(2,7)}&dir:0&c1_r:{colors[0]}&c1_g:{colors[1]}&c1_b:{colors[2]}&') while i["start"] + i["duration"] > self.current_progress_ms: if self.current_progress_ms < i["start"]: break time.sleep(0.01) NewValue = (((i["loudness"] +60) * 255) / 70) # print(self.current_progress_ms) # print(i["start"]) # clear() # clear() # print(int(NewValue)) except: pass def track_data(self): self.current_track_data = sp.current_user_playing_track() self.fetch_new_audiodata() self.track_start_time = time.time() - self.current_track_data["progress_ms"] / 1000 self.thread3.start() self.thread4.start() self.thread5.start() # send_data(f'bpm:150&bright:0&anim:0&dir:0&c1_r:0&c1_g:0&c1_b:0&c2_r:0&c2_g:0&c2_b:0&') while True: try: # print("yeet") starttime = time.time() self.new_current_track_data = sp.current_user_playing_track() self.delaytime = time.time() - starttime print(self.delaytime) self.is_playing = self.current_track_data["is_playing"] self.track_start_time = time.time() - self.current_track_data["progress_ms"] / 1000 if self.current_track_data["item"]["id"] != self.new_current_track_data["item"]["id"]: self.current_track_data = self.new_current_track_data # self.fetch_new_audiodata() self.current_track_data = self.new_current_track_data # clear() # print(current_track_data["progress_ms"]) endtime = time.time() + 3 while time.time() < endtime: if self.current_progress_ms > self.current_audio_analysis["track"]["duration"] : print("bruh") self.fetch_new_audiodata() break else: time.sleep(0.1) except: send_data(f'bpm:150&bright:0&anim:0&dir:0&c1_r:0&c1_g:0&c1_b:0&c2_r:0&c2_g:0&c2_b:0&') if __name__ == "__main__": ledsync = LEDSYNC()