mirror of
https://github.com/morgan9e/UxPlay
synced 2026-04-14 00:04:13 +09:00
UxPlay 1.39 add support for Apple Lossless (ALAC) audio-only AirPlay
This commit is contained in:
13
README.md
13
README.md
@@ -1,4 +1,5 @@
|
||||
# UxPlay 1.38
|
||||
|
||||
# UxPlay 1.39
|
||||
|
||||
This project is a GPLv3 unix AirPlay server which now also works on macOS.
|
||||
Its main use is to act like an AppleTV for screen-mirroring (with audio) of iOS/macOS clients
|
||||
@@ -15,14 +16,18 @@ On Linux and BSD Unix servers, this is usually provided by [Avahi](https://www.a
|
||||
through the avahi-daemon service, and is included in most Linux distributions (this
|
||||
service can also be provided by macOS, iOS or Windows servers).
|
||||
|
||||
_Note: UxPlay currently only works using the AirPlay screen-mirroring protocol (which streams audio in **AAC** format)
|
||||
_New: UxPlay 1.39 now also supports the Airplay audio-only protocol as well as AirPlay Mirror protocal, and can play Apple Lossless (ALAC)
|
||||
44100/16/2 audio streamed from Apple Music on the client in 2-channel stereo without video (the accompanying coverart and metadata is received by the server,
|
||||
but not displayed)._
|
||||
|
||||
currently only works using the AirPlay screen-mirroring protocol (which streams audio in **AAC** format)
|
||||
(not the AirPlay audio-only streaming protocol which uses lossless **ALAC** format)
|
||||
but the **uxplay -vs 0** option streams AAC-ELD audio using screen-mirroring without displaying the client's screen.
|
||||
If the client streams audio using AirPlay as opposed to AirPlay screen-mirroring, non-AAC data can be seen to be received and processed by UxPlay, and then
|
||||
input into the GStreamer audio-rendering pipeline, but does not get rendered into audio output. If someone can adapt the GStreamer audio
|
||||
pipeline to also render these Airplay audio streams, such an enhancement of UxPlay would be welcome as a Pull Request!_
|
||||
|
||||
UxPlay 1.38 is based on https://github.com/FD-/RPiPlay, with GStreamer integration from
|
||||
UxPlay 1.39 is based on https://github.com/FD-/RPiPlay, with GStreamer integration from
|
||||
https://github.com/antimof/UxPlay.
|
||||
(UxPlay only uses GStreamer, and does not contain the alternative Raspberry-Pi-specific
|
||||
audio and video renderers also found in RPiPlay.)
|
||||
@@ -318,6 +323,8 @@ Also: image transforms that had been added to RPiPlay have been ported to UxPlay
|
||||
|
||||
|
||||
# ChangeLog
|
||||
1.39 2021-11-06 Added support for Apple Lossless (ALAC) audio streams.
|
||||
|
||||
1.38 2021-10-8 Add -as _audiosink_ option to allow user to choose the GStreamer audiosink.
|
||||
|
||||
1.37 2021-09-29 Append "@hostname" to AirPlay Server name, where "hostname" is the name of the
|
||||
|
||||
@@ -36,13 +36,13 @@ struct raop_callbacks_s {
|
||||
|
||||
void (*audio_process)(void *cls, raop_ntp_t *ntp, aac_decode_struct *data);
|
||||
void (*video_process)(void *cls, raop_ntp_t *ntp, h264_decode_struct *data);
|
||||
void (*audio_setup)(void *cls, unsigned char *compression_type);
|
||||
|
||||
/* Optional but recommended callback functions */
|
||||
void (*conn_init)(void *cls);
|
||||
void (*conn_destroy)(void *cls);
|
||||
void (*audio_flush)(void *cls);
|
||||
void (*video_flush)(void *cls);
|
||||
void (*audio_compression_type)(void *cls, unsigned char *compression_type);
|
||||
void (*audio_set_volume)(void *cls, float volume);
|
||||
void (*audio_set_metadata)(void *cls, const void *buffer, int buflen);
|
||||
void (*audio_set_coverart)(void *cls, const void *buffer, int buflen);
|
||||
|
||||
@@ -433,12 +433,10 @@ raop_handler_setup(raop_conn_t *conn,
|
||||
// Audio
|
||||
unsigned short cport = conn->raop->control_lport, dport = conn->raop->data_lport;
|
||||
uint64_t ct;
|
||||
if (conn->raop->callbacks.audio_compression_type) {
|
||||
/* get audio compression type */
|
||||
plist_t req_stream_ct_node = plist_dict_get_item(req_stream_node, "ct");
|
||||
plist_get_uint_val(req_stream_ct_node, &ct);
|
||||
conn->raop->callbacks.audio_compression_type(conn->raop->callbacks.cls, (unsigned char*) &ct);
|
||||
}
|
||||
/* get audio compression type */
|
||||
plist_t req_stream_ct_node = plist_dict_get_item(req_stream_node, "ct");
|
||||
plist_get_uint_val(req_stream_ct_node, &ct);
|
||||
conn->raop->callbacks.audio_setup(conn->raop->callbacks.cls, (unsigned char*) &ct);
|
||||
|
||||
if (conn->raop_rtp) {
|
||||
raop_rtp_start_audio(conn->raop_rtp, use_udp, remote_cport, &cport, &dport);
|
||||
@@ -550,14 +548,14 @@ raop_handler_set_parameter(raop_conn_t *conn,
|
||||
}
|
||||
free(datastr);
|
||||
} else if (!strcmp(content_type, "image/jpeg") || !strcmp(content_type, "image/png")) {
|
||||
logger_log(conn->raop->logger, LOGGER_INFO, "Got image data of %d bytes", datalen);
|
||||
logger_log(conn->raop->logger, LOGGER_DEBUG, "Got image data of %d bytes", datalen);
|
||||
if (conn->raop_rtp) {
|
||||
raop_rtp_set_coverart(conn->raop_rtp, data, datalen);
|
||||
} else {
|
||||
logger_log(conn->raop->logger, LOGGER_WARNING, "RAOP not initialized at SET_PARAMETER coverart");
|
||||
}
|
||||
} else if (!strcmp(content_type, "application/x-dmap-tagged")) {
|
||||
logger_log(conn->raop->logger, LOGGER_INFO, "Got metadata of %d bytes", datalen);
|
||||
logger_log(conn->raop->logger, LOGGER_DEBUG, "Got metadata of %d bytes", datalen);
|
||||
if (conn->raop_rtp) {
|
||||
raop_rtp_set_metadata(conn->raop_rtp, data, datalen);
|
||||
} else {
|
||||
|
||||
@@ -34,7 +34,7 @@ static const char alac[] = "audio/x-alac,mpegversion=(int)4,channnels=(int)2,rat
|
||||
/* ct = 4; codec_data from MPEG v4 ISO 14996-3 Section 1.6.2.1: AAC-LC 44100/2 spf = 1024 */
|
||||
static const char aac_lc[] ="audio/mpeg,mpegversion=(int)4,channnels=(int)2,rate=(int)44100,stream-format=raw,codec_data=(buffer)1210";
|
||||
|
||||
/* ct = 8; codec_data from MPEG v4 ISO 14996-3 Section 1.6.2.1: AAC_ELD 44100/2 spf = 460 */
|
||||
/* ct = 8; codec_data from MPEG v4 ISO 14996-3 Section 1.6.2.1: AAC_ELD 44100/2 spf = 480 */
|
||||
static const char aac_eld[] ="audio/mpeg,mpegversion=(int)4,channnels=(int)2,rate=(int)44100,stream-format=raw,codec_data=(buffer)f8e85000";
|
||||
|
||||
struct audio_renderer_s {
|
||||
@@ -71,9 +71,6 @@ audio_renderer_t *audio_renderer_init(logger_t *logger, unsigned char *compressi
|
||||
GError *error = NULL;
|
||||
GstCaps *caps = NULL;
|
||||
|
||||
|
||||
|
||||
|
||||
switch (*compression_type) {
|
||||
case 1: /* uncompressed PCM */
|
||||
case 2: /* Apple lossless ALAC */
|
||||
@@ -127,7 +124,7 @@ audio_renderer_t *audio_renderer_init(logger_t *logger, unsigned char *compressi
|
||||
|
||||
g_object_set(renderer->appsrc, "caps", caps, NULL);
|
||||
gst_caps_unref(caps);
|
||||
|
||||
|
||||
return renderer;
|
||||
}
|
||||
|
||||
@@ -142,6 +139,12 @@ void audio_renderer_render_buffer(audio_renderer_t *renderer, raop_ntp_t *ntp, u
|
||||
|
||||
if (data_len == 0) return;
|
||||
|
||||
/* all audio received seems to be either ct = 8 (AAC_ELD 44100/2 spf 460 ) AirPlay Mirror protocol */
|
||||
/* or ct = 2 (ALAC 44100/16/2 spf 352) AirPlay protocol */
|
||||
/* first byte data[0] of ALAC frame is 0x20, first byte of AAC_ELD is 0x8d or 0x8e, AAC_LC is 0xff (ADTS) */
|
||||
/* GStreamer caps_filter could be used here to switch the appsrc caps between aac_eld and alac */
|
||||
/* depending on the initial byte of the buffer, with a pipeline using decodebin */
|
||||
|
||||
buffer = gst_buffer_new_and_alloc(data_len);
|
||||
assert(buffer != NULL);
|
||||
GST_BUFFER_DTS(buffer) = (GstClockTime)pts;
|
||||
@@ -162,14 +165,18 @@ void audio_renderer_flush(audio_renderer_t *renderer) {
|
||||
}
|
||||
|
||||
void audio_renderer_destroy(audio_renderer_t *renderer) {
|
||||
|
||||
gst_app_src_end_of_stream (GST_APP_SRC(renderer->appsrc));
|
||||
gst_object_unref (renderer->appsrc);
|
||||
gst_element_set_state (renderer->pipeline, GST_STATE_NULL);
|
||||
gst_object_unref (renderer->pipeline);
|
||||
|
||||
gst_object_unref (renderer->volume);
|
||||
if (renderer) {
|
||||
if(renderer) {
|
||||
if(renderer->appsrc) {
|
||||
gst_app_src_end_of_stream (GST_APP_SRC(renderer->appsrc));
|
||||
gst_object_unref (renderer->appsrc);
|
||||
}
|
||||
if (renderer->pipeline) {
|
||||
gst_element_set_state (renderer->pipeline, GST_STATE_NULL);
|
||||
gst_object_unref (renderer->pipeline);
|
||||
}
|
||||
if(renderer->volume) {
|
||||
gst_object_unref (renderer->volume);
|
||||
}
|
||||
free(renderer);
|
||||
}
|
||||
}
|
||||
|
||||
55
uxplay.cpp
55
uxplay.cpp
@@ -44,7 +44,7 @@
|
||||
|
||||
static int start_server (std::vector<char> hw_addr, std::string name, unsigned short display[5],
|
||||
unsigned short tcp[3], unsigned short udp[3], videoflip_t videoflip[2],
|
||||
bool use_audio, bool debug_log, std::string videosink, std::string audiosink);
|
||||
bool debug_log, std::string videosink);
|
||||
|
||||
static int stop_server ();
|
||||
|
||||
@@ -60,7 +60,9 @@ static bool connections_stopped = false;
|
||||
static unsigned int server_timeout = 0;
|
||||
static unsigned int counter;
|
||||
static bool use_video = true;
|
||||
static unsigned char compression_type = 8;
|
||||
static unsigned char compression_type = 0;
|
||||
static std::string audiosink = "autoaudiosink";
|
||||
static bool use_audio = true;
|
||||
|
||||
|
||||
gboolean connection_callback (gpointer loop){
|
||||
@@ -294,13 +296,11 @@ static void append_hostname(std::string &server_name) {
|
||||
int main (int argc, char *argv[]) {
|
||||
std::string server_name = DEFAULT_NAME;
|
||||
std::vector<char> server_hw_addr;
|
||||
bool use_audio = true;
|
||||
bool use_random_hw_addr = false;
|
||||
bool debug_log = DEFAULT_DEBUG_LOG;
|
||||
unsigned short display[5] = {0}, tcp[3] = {0}, udp[3] = {0};
|
||||
videoflip_t videoflip[2] = { NONE , NONE };
|
||||
std::string videosink = "autovideosink";
|
||||
std::string audiosink = "autoaudiosink";
|
||||
|
||||
#ifdef SUPPRESS_AVAHI_COMPAT_WARNING
|
||||
// suppress avahi_compat nag message. avahi emits a "nag" warning (once)
|
||||
@@ -392,6 +392,12 @@ int main (int argc, char *argv[]) {
|
||||
}
|
||||
}
|
||||
|
||||
if(audiosink == "0") {
|
||||
use_audio = false;
|
||||
}
|
||||
if(!use_audio) LOGI("audio_disabled");
|
||||
|
||||
|
||||
if (udp[0]) LOGI("using network ports UDP %d %d %d TCP %d %d %d\n",
|
||||
udp[0],udp[1], udp[2], tcp[0], tcp[1], tcp[2]);
|
||||
|
||||
@@ -408,9 +414,10 @@ int main (int argc, char *argv[]) {
|
||||
append_hostname(server_name);
|
||||
|
||||
relaunch:
|
||||
compression_type = 0;
|
||||
connections_stopped = false;
|
||||
if (start_server(server_hw_addr, server_name, display, tcp, udp,
|
||||
videoflip,use_audio, debug_log, videosink, audiosink)) {
|
||||
videoflip, debug_log, videosink)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -466,7 +473,23 @@ extern "C" void audio_set_volume (void *cls, float volume) {
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void audio_compression_type (void *cls, unsigned char *ct) {
|
||||
extern "C" void audio_setup (void *cls, unsigned char *ct) {
|
||||
if(use_audio) {
|
||||
LOGI("new audio compression type %d (was %d)",*ct, compression_type);
|
||||
if (*ct != compression_type) {
|
||||
if (compression_type && audio_renderer) {
|
||||
audio_renderer_destroy(audio_renderer);
|
||||
LOGD("previous audio_renderer destroyed");
|
||||
}
|
||||
compression_type = *ct;
|
||||
audio_renderer = audio_renderer_init(render_logger, &compression_type, audiosink.c_str());
|
||||
if (audio_renderer) {
|
||||
audio_renderer_start(audio_renderer);
|
||||
} else {
|
||||
LOGW("could not init audio_renderer");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void log_callback (void *cls, int level, const char *msg) {
|
||||
@@ -494,8 +517,8 @@ extern "C" void log_callback (void *cls, int level, const char *msg) {
|
||||
}
|
||||
|
||||
int start_server (std::vector<char> hw_addr, std::string name, unsigned short display[5],
|
||||
unsigned short tcp[3], unsigned short udp[3], videoflip_t videoflip[2],
|
||||
bool use_audio, bool debug_log, std::string videosink, std::string audiosink) {
|
||||
unsigned short tcp[3], unsigned short udp[3], videoflip_t videoflip[2],
|
||||
bool debug_log, std::string videosink) {
|
||||
raop_callbacks_t raop_cbs;
|
||||
memset(&raop_cbs, 0, sizeof(raop_cbs));
|
||||
raop_cbs.conn_init = conn_init;
|
||||
@@ -505,7 +528,7 @@ int start_server (std::vector<char> hw_addr, std::string name, unsigned short di
|
||||
raop_cbs.audio_flush = audio_flush;
|
||||
raop_cbs.video_flush = video_flush;
|
||||
raop_cbs.audio_set_volume = audio_set_volume;
|
||||
raop_cbs.audio_compression_type = audio_compression_type;
|
||||
raop_cbs.audio_setup = audio_setup;
|
||||
|
||||
raop = raop_init(10, &raop_cbs);
|
||||
if (raop == NULL) {
|
||||
@@ -520,9 +543,6 @@ int start_server (std::vector<char> hw_addr, std::string name, unsigned short di
|
||||
use_video = false;
|
||||
display[3] = 1; /* set fps to 1 frame per sec when no video will be shown */
|
||||
}
|
||||
if(audiosink == "0") {
|
||||
use_audio = false;
|
||||
}
|
||||
|
||||
raop_set_display(raop, display[0], display[1], display[2], display[3], display[4]);
|
||||
|
||||
@@ -548,17 +568,7 @@ int start_server (std::vector<char> hw_addr, std::string name, unsigned short di
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (! use_audio) {
|
||||
LOGI("Audio disabled");
|
||||
} else if ((audio_renderer = audio_renderer_init(render_logger, &compression_type, audiosink.c_str())) ==
|
||||
NULL) {
|
||||
LOGE("Could not init audio renderer");
|
||||
stop_server();
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (use_video && video_renderer) video_renderer_start(video_renderer);
|
||||
if (audio_renderer) audio_renderer_start(audio_renderer);
|
||||
|
||||
unsigned short port = raop_get_port(raop);
|
||||
raop_start(raop, &port);
|
||||
@@ -594,6 +604,7 @@ int stop_server () {
|
||||
}
|
||||
if (audio_renderer) audio_renderer_destroy(audio_renderer);
|
||||
if (video_renderer) video_renderer_destroy(video_renderer);
|
||||
compression_type = 0;
|
||||
if (render_logger) logger_destroy(render_logger);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user