Thank you for your donation!


Cloudsmith graciously provides open-source package management and distribution for our project.


Instruction Guide Exporting/Importing Radio Stations
#41
Sorry for the delay, but my old PC gave up on me, and it took me a few days to convince it to work properly again.
I'm attaching the Json that I would like to import. I tried using the restore function, but I don’t see anything—maybe I made a mistake when creating the backup.
{
    "fields": [
        "id",
        "station",
        "name",
        "type",
        "logo",
        "genre",
        "broadcaster",
        "language",
        "country",
        "region",
        "bitrate",
        "format",
        "geo_fenced",
        "home_page",
        "monitor"
    ],
    "stations": [
        {
            "id": 1,
            "station": "https://icy.unitedradio.it/VirginRockClassics.mp3",
            "name": "Virgin Rock Classics",
            "type": "r",
            "logo": "local",
            "genre": "",
            "broadcaster": "",
            "language": "",
            "country": "",
            "region": "",
            "bitrate": "",
            "format": "",
            "geo_fenced": "No",
            "home_page": "",
            "monitor": "No"
        },
        {
            "id": 3,
            "station": "http://edge.radiomontecarlo.net/RMC.mp3",
            "name": "Radio Montecarlo",
            "type": "r",
            "logo": "local",
            "genre": "",
            "broadcaster": "",
            "language": "",
            "country": "",
            "region": "",
            "bitrate": "",
            "format": "",
            "geo_fenced": "No",
            "home_page": "",
            "monitor": "No"
        },
        {
            "id": 4,
            "station": "https://icy.unitedradio.it/VirginRogerWaters.mp3",
            "name": "Pink Floyd",
            "type": "r",
            "logo": "local",
            "genre": "",
            "broadcaster": "",
            "language": "",
            "country": "",
            "region": "",
            "bitrate": "",
            "format": "",
            "geo_fenced": "No",
            "home_page": "",
            "monitor": "No"
        },
        {
            "id": 5,
            "station": "http://radio.linn.co.uk:8000/autodj",
            "name": "Linn Jazz",
            "type": "r",
            "logo": "local",
            "genre": "Jazz",
            "broadcaster": "Linn",
            "language": "English",
            "country": "United Kingdom",
            "region": "Europe",
            "bitrate": "320",
            "format": "MP3",
            "geo_fenced": "No",
            "home_page": "",
            "monitor": "No"
        },
        {
            "id": 6,
            "station": "http://radio.linn.co.uk:8003/autodj",
            "name": "Linn Radio",
            "type": "r",
            "logo": "local",
            "genre": "Eclectic",
            "broadcaster": "Linn",
            "language": "English",
            "country": "United Kingdom",
            "region": "Europe",
            "bitrate": "320",
            "format": "MP3",
            "geo_fenced": "No",
            "home_page": "",
            "monitor": "No"
        },
        {
            "id": 7,
            "station": "http://radio.linn.co.uk:8004/autodj",
            "name": "Linn Classical",
            "type": "r",
            "logo": "local",
            "genre": "Classical",
            "broadcaster": "Linn",
            "language": "English",
            "country": "United Kingdom",
            "region": "Europe",
            "bitrate": "320",
            "format": "MP3",
            "geo_fenced": "No",
            "home_page": "",
            "monitor": "No"
        },
        {
            "id": 8,
            "station": "https://4c4b867c89244861ac216426883d1ad0.msvdn.net/radiocapital/radiocapital/master_ma.m3u8",
            "name": "Radio Capital",
            "type": "regular",
            "logo": "local",
            "genre": "",
            "broadcaster": "",
            "language": "",
            "country": "",
            "region": "",
            "bitrate": "",
            "format": "",
            "geo_fenced": "",
            "home_page": "",
            "monitor": ""
        },
        {
            "id": 9,
            "station": "https://streamcdnb2-dd782ed59e2a4e86aabf6fc508674b59.msvdn.net/live/S3160845/D6MENOraq6Qy/chunklist_b128000.m3u8",
            "name": "Radio Freccia",
            "type": "regular",
            "logo": "local",
            "genre": "",
            "broadcaster": "",
            "language": "",
            "country": "",
            "region": "",
            "bitrate": "",
            "format": "",
            "geo_fenced": "",
            "home_page": "",
            "monitor": ""
        },
        {
            "id": 10,
            "station": "http://str01.fluidstream.net:7020/;stream.mp3",
            "name": "Radio 80",
            "type": "regular",
            "logo": "local",
            "genre": "",
            "broadcaster": "",
            "language": "",
            "country": "",
            "region": "",
            "bitrate": "",
            "format": "",
            "geo_fenced": "",
            "home_page": "",
            "monitor": ""
        }
    ]
}
In fact, when I try to restore, it only activates the playlist cursor, but not radio moode or personal radio.

I would also attach the ZIP file, but I don't know if it's possible.
tanks for attention
Antonello
Reply
#42
@antoaudio

Yes, you can attach your ZIP file to a post in this forum, but it must be smaller than 2048 KB. If larger, you could post it to DropBox or some other filesharing site and post a link to it in your forum post (or in a private message to @Tim Curtis , me, possibly @bitlab , and @Nutul  if he wants to see it)..

Regards,
Kent
Reply
#43
Hello everyone,
if you allow me, I would like to give my personal and humble cent: Antonello, could you try with the attached .zip file?
Thanks, sorry for the intrusion and best regards,
Francesco

P.S. Ciao


.zip   moOde_9.2.0_Antonello.zip (Size: 558.3 KB / Downloads: 2)
Reply
#44
(03-01-2025, 11:25 AM)fdealexa Wrote: Hello everyone,
if you allow me, I would like to give my personal and humble cent: Antonello, could you try with the attached .zip file?
Thanks, sorry for the intrusion and best regards,
Francesco

P.S. Ciao

Thanks for stepping in, Francesco. I wasn’t trying to make this a private club—-just naming a few folks off the top of my head.

Ciao.

-Kent
Reply
#45
Thank you, Kent, for your kindness; let us wait for Antonello's response.
Best regards and thanks again,
Francesco
Reply
#46
(03-01-2025, 11:25 AM)fdealexa Wrote: Hello everyone,
if you allow me, I would like to give my personal and humble cent: Antonello, could you try with the attached .zip file?
Thanks, sorry for the intrusion and best regards,
Francesco

P.S. Ciao

Bingo!!!!!!!!!!!! 
Thanks a lot!! What was the issue with the ZIP? I'm going to compare it with the one I generated. Thanks for everyone's support e grazie Francesco!!
Reply
#47
Hi Antonello,
I'm glad you're happy to have solved it.
I found that the RADIOFRECCIA stream address was not correct: but this cannot cause the malfunction that you had reported.
The identification number, being relative to additional stations, seems correct to me to start from 500 and, furthermore, I noticed that there was no continuity (the id. 2 was missing).
Last note: in some cases you used "no" for some fields.
Frankly, I have not, intentionally, tried to replicate the problem but I preferred to create a file starting exclusively from "station" and "name".
Kind regards.
Ciao,
Francesco
Reply
#48
(03-01-2025, 05:48 PM)fdealexa Wrote: Hi Antonello,
I'm glad you're happy to have solved it.
I found that the RADIOFRECCIA stream address was not correct: but this cannot cause the malfunction that you had reported.
The identification number, being relative to additional stations, seems correct to me to start from 500 and, furthermore, I noticed that there was no continuity (the id. 2 was missing).
Last note: in some cases you used "no" for some fields.
Frankly, I have not, intentionally, tried to replicate the problem but I preferred to create a file starting exclusively from "station" and "name".
Kind regards.
Ciao,
Francesco

Hi, I don't know if I've "reinvented the wheel," but based on the experiences mentioned above and needing to reload some Italian stations, I wrote a Python script that automates a few tasks:
  1. It downloads the configuration using the Station Manager utility.
  2. It retrieves a list of radio stations via the Radio-Browser API (I filtered n Italian radios and m Tuscan radios within the script).
  3. It attempts to download the station logo if available, converting it from PNG to three JPG formats; if no logo is provided, it assigns a default.jpg (which I took from Moode).
  4. It merges the data with the Moode backup, avoiding duplicates, and generates both a JSON file and an icon folder.
  5. It creates a ZIP file and reuploads it using the radio_station utility.
I'd say it works fine, apart from the hardcoded filters, which aren't pretty but get the job done.

Just to give my small contribution, I'm pasting the script
Code:
import requests
import json
import os
import re
import zipfile
from datetime import datetime
from PIL import Image

# Directory to save images
image_dir = "radio-logos"
thumbnails_dir = os.path.join(image_dir, "thumbs")
os.makedirs(image_dir, exist_ok=True)
os.makedirs(thumbnails_dir, exist_ok=True)

# Base URL for radio-browser API
base_url = "https://de1.api.radio-browser.info/json/stations/search"

# Get current timestamp
now = datetime.now().strftime("%Y%m%d_%H%M%S")
export_filename = f"moode_export_{now}.zip"
import_filename = f"moode_import_{now}.zip"

# Export current Moode radio stations
os.system(f"sudo python3 /var/www/util/station_manager.py --export {export_filename}")

# Load exported station data
with zipfile.ZipFile(export_filename, 'r') as zip_ref:
   zip_ref.extractall("moode_export")
exported_json_path = os.path.join("moode_export", "station_data.json")

with open(exported_json_path, "r") as f:
   stations_data = json.load(f)

# Find the highest current ID
existing_ids = [s["id"] for s in stations_data["stations"]]
next_station_id = max(existing_ids, default=550) + 1

# Collect existing station URLs to prevent duplicates
existing_urls = {s["station"] for s in stations_data["stations"]}

# Default image path
default_image_path = "default.jpg"

def sanitize_name(name):
   return re.sub(r'[^a-zA-Z0-9\s]', '', name).strip()

def download_and_convert_favicon(favicon_url, station_name):
   safe_name = sanitize_name(station_name)  # Nome senza caratteri speciali
   image_name = f"{safe_name}.jpg"
   image_path = os.path.join(image_dir, image_name)
   thumb_200_path = os.path.join(thumbnails_dir, f"{safe_name}.jpg")
   thumb_80_path = os.path.join(thumbnails_dir, f"{safe_name}_sm.jpg")
   
   if not os.path.exists(default_image_path):
       print(f"Error: Default image '{default_image_path}' not found.")
       return "default.jpg"
   
   if not favicon_url:
       os.system(f"cp '{default_image_path}' '{image_path}'")  # Copy default image
   else:
       try:
           response = requests.get(favicon_url, stream=True, timeout=5)
           if response.status_code == 200:
               temp_path = os.path.join(image_dir, f"{safe_name}.png")
               with open(temp_path, 'wb') as f:
                   for chunk in response.iter_content(1024):
                       f.write(chunk)
               # Convert to JPEG
               img = Image.open(temp_path)
               img = img.convert("RGB")
               img.save(image_path, "JPEG")
               os.remove(temp_path)  # Remove original PNG file
           else:
               os.system(f"cp '{default_image_path}' '{image_path}'")
       except Exception as e:
           print(f"Error downloading or converting favicon for {station_name}: {e}")
           os.system(f"cp '{default_image_path}' '{image_path}'")
   
   # Create 200x200 thumbnail
   img = Image.open(image_path)
   img.thumbnail((200, 200))
   img.save(thumb_200_path, "JPEG")
   
   # Create 80x80 thumbnail
   img.thumbnail((80, 80))
   img.save(thumb_80_path, "JPEG")
   
   return image_name

def fetch_stations(params, limit):
   global next_station_id
   response = requests.get(base_url, params=params)
   if response.status_code == 200:
       stations = response.json()
       for station in stations[:limit]:
           station_url = station.get("url_resolved", "")
           if station_url in existing_urls:
               continue  # Skip duplicate stations
           
           station_name = sanitize_name(station.get("name", "Unknown"))
           favicon_url = station.get("favicon", "")
           logo = download_and_convert_favicon(favicon_url, station_name)
           
           stations_data["stations"].append({
               "id": next_station_id,
               "station": station_url,
               "name": station_name,  # Nome senza _
               "type": "r",
               "logo": "local",
               "genre": station.get("tags", ""),
               "broadcaster": "XX. Antonello",
               "language": station.get("language", ""),
               "country": station.get("country", ""),
               "region": station.get("state", ""),
               "bitrate": "",  # Exclude bitrate
               "format": station.get("codec", ""),
               "geo_fenced": "",
               "home_page": station.get("homepage", ""),
               "monitor": ""
           })
           existing_urls.add(station_url)
           next_station_id += 1

# Fetch n (=15) stations from Tuscany
fetch_stations({"country": "Italy", "state": "Tuscany", "order": "votes","reverse":"true","hidebroken":"true"}, limit=10)
fetch_stations({"country": "Italy", "region": "Toscana", "order": "votes","reverse":"true","hidebroken":"true"}, limit=10)
# Fetch 50 national stations
fetch_stations({"country": "Italy", "order": "votes","reverse":"true","hidebroken":"true"}, limit=50)

# Sort stations by ID
stations_data["stations"].sort(key=lambda x: x["id"])

# Save updated stations data
updated_json_path = "station_data.json"
with open(updated_json_path, "w") as f:
   json.dump(stations_data, f, indent=4)

# Create import ZIP
with zipfile.ZipFile(import_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
   zipf.write(updated_json_path, "station_data.json")
   for root, _, files in os.walk(image_dir):
       for file in files:
           file_path = os.path.join(root, file)
           arcname = os.path.relpath(file_path, start=image_dir)
           zipf.write(file_path, os.path.join("radio-logos", arcname))

# Import updated stations into Moode Audio
os.system(f"sudo python3 /var/www/util/station_manager.py --scope other --import {import_filename}")

print(f"Data has been successfully imported from {import_filename}")
Ciao

Antonello
Reply
#49
(03-04-2025, 09:50 PM)antoaudio Wrote:
(03-01-2025, 05:48 PM)fdealexa Wrote: Hi Antonello,
I'm glad you're happy to have solved it.
I found that the RADIOFRECCIA stream address was not correct: but this cannot cause the malfunction that you had reported.
The identification number, being relative to additional stations, seems correct to me to start from 500 and, furthermore, I noticed that there was no continuity (the id. 2 was missing).
Last note: in some cases you used "no" for some fields.
Frankly, I have not, intentionally, tried to replicate the problem but I preferred to create a file starting exclusively from "station" and "name".
Kind regards.
Ciao,
Francesco

Hi, I don't know if I've "reinvented the wheel," but based on the experiences mentioned above and needing to reload some Italian stations, I wrote a Python script that automates a few tasks:
  1. It downloads the configuration using the Station Manager utility.
  2. It retrieves a list of radio stations via the Radio-Browser API (I filtered n Italian radios and m Tuscan radios within the script).
  3. It attempts to download the station logo if available, converting it from PNG to three JPG formats; if no logo is provided, it assigns a default.jpg (which I took from Moode).
  4. It merges the data with the Moode backup, avoiding duplicates, and generates both a JSON file and an icon folder.
  5. It creates a ZIP file and reuploads it using the radio_station utility.
I'd say it works fine, apart from the hardcoded filters, which aren't pretty but get the job done.

Just to give my small contribution, I'm pasting the script
Code:
import requests
import json
import os
import re
import zipfile
from datetime import datetime
from PIL import Image

# Directory to save images
image_dir = "radio-logos"
thumbnails_dir = os.path.join(image_dir, "thumbs")
os.makedirs(image_dir, exist_ok=True)
os.makedirs(thumbnails_dir, exist_ok=True)

# Base URL for radio-browser API
base_url = "https://de1.api.radio-browser.info/json/stations/search"

# Get current timestamp
now = datetime.now().strftime("%Y%m%d_%H%M%S")
export_filename = f"moode_export_{now}.zip"
import_filename = f"moode_import_{now}.zip"

# Export current Moode radio stations
os.system(f"sudo python3 /var/www/util/station_manager.py --export {export_filename}")

# Load exported station data
with zipfile.ZipFile(export_filename, 'r') as zip_ref:
   zip_ref.extractall("moode_export")
exported_json_path = os.path.join("moode_export", "station_data.json")

with open(exported_json_path, "r") as f:
   stations_data = json.load(f)

# Find the highest current ID
existing_ids = [s["id"] for s in stations_data["stations"]]
next_station_id = max(existing_ids, default=550) + 1

# Collect existing station URLs to prevent duplicates
existing_urls = {s["station"] for s in stations_data["stations"]}

# Default image path
default_image_path = "default.jpg"

def sanitize_name(name):
   return re.sub(r'[^a-zA-Z0-9\s]', '', name).strip()

def download_and_convert_favicon(favicon_url, station_name):
   safe_name = sanitize_name(station_name)  # Nome senza caratteri speciali
   image_name = f"{safe_name}.jpg"
   image_path = os.path.join(image_dir, image_name)
   thumb_200_path = os.path.join(thumbnails_dir, f"{safe_name}.jpg")
   thumb_80_path = os.path.join(thumbnails_dir, f"{safe_name}_sm.jpg")
   
   if not os.path.exists(default_image_path):
       print(f"Error: Default image '{default_image_path}' not found.")
       return "default.jpg"
   
   if not favicon_url:
       os.system(f"cp '{default_image_path}' '{image_path}'")  # Copy default image
   else:
       try:
           response = requests.get(favicon_url, stream=True, timeout=5)
           if response.status_code == 200:
               temp_path = os.path.join(image_dir, f"{safe_name}.png")
               with open(temp_path, 'wb') as f:
                   for chunk in response.iter_content(1024):
                       f.write(chunk)
               # Convert to JPEG
               img = Image.open(temp_path)
               img = img.convert("RGB")
               img.save(image_path, "JPEG")
               os.remove(temp_path)  # Remove original PNG file
           else:
               os.system(f"cp '{default_image_path}' '{image_path}'")
       except Exception as e:
           print(f"Error downloading or converting favicon for {station_name}: {e}")
           os.system(f"cp '{default_image_path}' '{image_path}'")
   
   # Create 200x200 thumbnail
   img = Image.open(image_path)
   img.thumbnail((200, 200))
   img.save(thumb_200_path, "JPEG")
   
   # Create 80x80 thumbnail
   img.thumbnail((80, 80))
   img.save(thumb_80_path, "JPEG")
   
   return image_name

def fetch_stations(params, limit):
   global next_station_id
   response = requests.get(base_url, params=params)
   if response.status_code == 200:
       stations = response.json()
       for station in stations[:limit]:
           station_url = station.get("url_resolved", "")
           if station_url in existing_urls:
               continue  # Skip duplicate stations
           
           station_name = sanitize_name(station.get("name", "Unknown"))
           favicon_url = station.get("favicon", "")
           logo = download_and_convert_favicon(favicon_url, station_name)
           
           stations_data["stations"].append({
               "id": next_station_id,
               "station": station_url,
               "name": station_name,  # Nome senza _
               "type": "r",
               "logo": "local",
               "genre": station.get("tags", ""),
               "broadcaster": "XX. Antonello",
               "language": station.get("language", ""),
               "country": station.get("country", ""),
               "region": station.get("state", ""),
               "bitrate": "",  # Exclude bitrate
               "format": station.get("codec", ""),
               "geo_fenced": "",
               "home_page": station.get("homepage", ""),
               "monitor": ""
           })
           existing_urls.add(station_url)
           next_station_id += 1

# Fetch n (=15) stations from Tuscany
fetch_stations({"country": "Italy", "state": "Tuscany", "order": "votes","reverse":"true","hidebroken":"true"}, limit=10)
fetch_stations({"country": "Italy", "region": "Toscana", "order": "votes","reverse":"true","hidebroken":"true"}, limit=10)
# Fetch 50 national stations
fetch_stations({"country": "Italy", "order": "votes","reverse":"true","hidebroken":"true"}, limit=50)

# Sort stations by ID
stations_data["stations"].sort(key=lambda x: x["id"])

# Save updated stations data
updated_json_path = "station_data.json"
with open(updated_json_path, "w") as f:
   json.dump(stations_data, f, indent=4)

# Create import ZIP
with zipfile.ZipFile(import_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
   zipf.write(updated_json_path, "station_data.json")
   for root, _, files in os.walk(image_dir):
       for file in files:
           file_path = os.path.join(root, file)
           arcname = os.path.relpath(file_path, start=image_dir)
           zipf.write(file_path, os.path.join("radio-logos", arcname))

# Import updated stations into Moode Audio
os.system(f"sudo python3 /var/www/util/station_manager.py --scope other --import {import_filename}")

print(f"Data has been successfully imported from {import_filename}")
Ciao

Antonello

That's pretty cool.  I think it deserves its own thread with an appropriate title so others can find it easily.
----------------
Robert
Reply


Forum Jump: