From 2ecf00d7733c473358b652c50b83cb95f65174ff Mon Sep 17 00:00:00 2001
From: Sven Gothel <sgothel@jausoft.com>
Date: Thu, 23 Feb 2023 22:18:39 +0100
Subject: FFMPEGMediaPlayer: Add implementation update for FFmpeg version 4.*
 (Debian 11), 5.* (Debian 12) and 6.* (Current Development trunk)

From here on, libav support has been dropped.

Required FFmpeg libraries to be fully matched by their major runtime- and compiletime-versions are:
- avcodec
- avformat
- avutil
- swresample

Library avdevice is optional and only used for video input devices (camera).

Library avresample has been removed, since FFmpeg dropped it as well in version 6.*
and swresample is preferred for lower versions.

The matching major-versions of each library to the FFmpeg version
is documented within FFMPEGMediaPlayer class API-doc.

Each implementation version uses the non-deprecated FFmpeg code-path
and compilation using matching header files is warning-free.
---
 src/jogl/native/libav/ffmpeg_impl_template.c | 808 ++++++++++++++-------------
 1 file changed, 412 insertions(+), 396 deletions(-)

(limited to 'src/jogl/native/libav/ffmpeg_impl_template.c')

diff --git a/src/jogl/native/libav/ffmpeg_impl_template.c b/src/jogl/native/libav/ffmpeg_impl_template.c
index d02770eb7..3e6b29d05 100644
--- a/src/jogl/native/libav/ffmpeg_impl_template.c
+++ b/src/jogl/native/libav/ffmpeg_impl_template.c
@@ -1,5 +1,5 @@
 /**
- * Copyright 2012 JogAmp Community. All rights reserved.
+ * Copyright 2012-2023 JogAmp Community. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modification, are
  * permitted provided that the following conditions are met:
@@ -33,13 +33,6 @@
 #include "ffmpeg_static.h"
 #include "ffmpeg_dshow.h"
 
-#include "libavutil/pixdesc.h"
-#include "libavutil/samplefmt.h"
-#if LIBAVUTIL_VERSION_MAJOR < 53
-    #include "libavutil/audioconvert.h"
-    // 52: #include "libavutil/channel_layout.h"
-#endif
-
 #include <GL/gl.h>
 
 #define HAS_FUNC(f) (NULL!=(f))
@@ -47,65 +40,59 @@
 typedef unsigned (APIENTRYP AVUTIL_VERSION)(void);
 typedef unsigned (APIENTRYP AVFORMAT_VERSION)(void);
 typedef unsigned (APIENTRYP AVCODEC_VERSION)(void);
-typedef unsigned (APIENTRYP AVRESAMPLE_VERSION)(void);
+typedef unsigned (APIENTRYP AVDEVICE_VERSION)(void);
 typedef unsigned (APIENTRYP SWRESAMPLE_VERSION)(void);
 
 static AVUTIL_VERSION sp_avutil_version;
 static AVFORMAT_VERSION sp_avformat_version; 
 static AVCODEC_VERSION sp_avcodec_version;
-static AVRESAMPLE_VERSION sp_avresample_version;
+static AVDEVICE_VERSION sp_avdevice_version;
 static SWRESAMPLE_VERSION sp_swresample_version;
 // count: 5
 
 // libavcodec
-typedef int (APIENTRYP AVCODEC_REGISTER_ALL)(void);
 typedef int (APIENTRYP AVCODEC_CLOSE)(AVCodecContext *avctx);
 typedef void (APIENTRYP AVCODEC_STRING)(char *buf, int buf_size, AVCodecContext *enc, int encode);
-typedef AVCodec *(APIENTRYP AVCODEC_FIND_DECODER)(int avCodecID); // lavc 53: 'enum CodecID id', lavc 54: 'enum AVCodecID id'
+typedef AVCodec *(APIENTRYP AVCODEC_FIND_DECODER)(enum AVCodecID avCodecID); // lavc 53: 'enum CodecID id', lavc 54: 'enum AVCodecID id'
+typedef AVCodecContext* (APIENTRYP AVCODEC_ALLOC_CONTEXT3)(const AVCodec* codec);
+typedef void (APIENTRYP AVCODEC_FREE_CONTEXT)(AVCodecContext** avctx);
+typedef int (APIENTRYP AVCODEC_PARAMTERS_TO_CONTEXT)(AVCodecContext *codec, const AVCodecParameters *par);
 typedef int (APIENTRYP AVCODEC_OPEN2)(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);                          // 53.6.0
-typedef AVFrame *(APIENTRYP AVCODEC_ALLOC_FRAME)(void);
-typedef void (APIENTRYP AVCODEC_GET_FRAME_DEFAULTS)(AVFrame *frame);
-typedef void (APIENTRYP AVCODEC_FREE_FRAME)(AVFrame **frame);
-typedef int (APIENTRYP AVCODEC_DEFAULT_GET_BUFFER)(AVCodecContext *s, AVFrame *pic); // <= 54 (opt), else AVCODEC_DEFAULT_GET_BUFFER2
-typedef void (APIENTRYP AVCODEC_DEFAULT_RELEASE_BUFFER)(AVCodecContext *s, AVFrame *pic); // <= 54 (opt), else AV_FRAME_UNREF
-typedef int (APIENTRYP AVCODEC_DEFAULT_GET_BUFFER2)(AVCodecContext *s, AVFrame *frame, int flags); // 55. (opt)
-typedef int (APIENTRYP AVCODEC_GET_EDGE_WIDTH)();
-typedef int (APIENTRYP AV_IMAGE_FILL_LINESIZES)(int linesizes[4], int pix_fmt, int width); // lavu 51: 'enum PixelFormat pix_fmt', lavu 53: 'enum AVPixelFormat pix_fmt'
-typedef void (APIENTRYP AVCODEC_ALIGN_DIMENSIONS)(AVCodecContext *s, int *width, int *height);
-typedef void (APIENTRYP AVCODEC_ALIGN_DIMENSIONS2)(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS]);
+typedef AVFrame *(APIENTRYP AV_FRAME_ALLOC)(void); // 55.28.1
+typedef void (APIENTRYP AV_FREE_FRAME)(AVFrame **frame); // 55.28.1
+typedef int (APIENTRYP AVCODEC_DEFAULT_GET_BUFFER2)(AVCodecContext *s, AVFrame *frame, int flags); // 55.
+typedef int (APIENTRYP AV_IMAGE_FILL_LINESIZES)(int linesizes[4], enum AVPixelFormat pix_fmt, int width); // lavu 51: 'enum PixelFormat pix_fmt', lavu 53: 'enum AVPixelFormat pix_fmt'
 typedef void (APIENTRYP AVCODEC_FLUSH_BUFFERS)(AVCodecContext *avctx);
-typedef void (APIENTRYP AV_INIT_PACKET)(AVPacket *pkt);
+typedef AVPacket* (APIENTRYP AV_PACKET_ALLOC)(void);
+typedef void (APIENTRYP AV_PACKET_FREE)(AVPacket **pkt);
 typedef int (APIENTRYP AV_NEW_PACKET)(AVPacket *pkt, int size);
-typedef void (APIENTRYP AV_DESTRUCT_PACKET)(AVPacket *pkt);
-typedef void (APIENTRYP AV_FREE_PACKET)(AVPacket *pkt);
-typedef int (APIENTRYP AVCODEC_DECODE_AUDIO4)(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt);     // 53.25.0
-typedef int (APIENTRYP AVCODEC_DECODE_VIDEO2)(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt); // 52.23.0
+typedef void (APIENTRYP AV_PACKET_UNREF)(AVPacket *pkt);
+typedef int (APIENTRYP AVCODEC_SEND_PACKET)(AVCodecContext *avctx, AVPacket *avpkt); // 57
+typedef int (APIENTRYP AVCODEC_RECEIVE_FRAME)(AVCodecContext *avctx, AVFrame *picture); // 57
 
-static AVCODEC_REGISTER_ALL sp_avcodec_register_all;
 static AVCODEC_CLOSE sp_avcodec_close;
 static AVCODEC_STRING sp_avcodec_string;
 static AVCODEC_FIND_DECODER sp_avcodec_find_decoder;
+static AVCODEC_ALLOC_CONTEXT3 sp_avcodec_alloc_context3;
+static AVCODEC_FREE_CONTEXT sp_avcodec_free_context;
+static AVCODEC_PARAMTERS_TO_CONTEXT sp_avcodec_parameters_to_context;
 static AVCODEC_OPEN2 sp_avcodec_open2;                    // 53.6.0
-static AVCODEC_ALLOC_FRAME sp_avcodec_alloc_frame;
-static AVCODEC_GET_FRAME_DEFAULTS sp_avcodec_get_frame_defaults;
-static AVCODEC_FREE_FRAME sp_avcodec_free_frame;
-static AVCODEC_DEFAULT_GET_BUFFER sp_avcodec_default_get_buffer; // <= 54 (opt), else sp_avcodec_default_get_buffer2
-static AVCODEC_DEFAULT_RELEASE_BUFFER sp_avcodec_default_release_buffer; // <= 54 (opt), else sp_av_frame_unref
-static AVCODEC_DEFAULT_GET_BUFFER2 sp_avcodec_default_get_buffer2; // 55. (opt)
-static AVCODEC_GET_EDGE_WIDTH sp_avcodec_get_edge_width;
+static AV_FRAME_ALLOC sp_av_frame_alloc;             // 55.28.1
+static AV_FREE_FRAME sp_av_free_frame;                    // 55.28.1
+static AVCODEC_DEFAULT_GET_BUFFER2 sp_avcodec_default_get_buffer2; // 55.
 static AV_IMAGE_FILL_LINESIZES sp_av_image_fill_linesizes;
-static AVCODEC_ALIGN_DIMENSIONS sp_avcodec_align_dimensions;
-static AVCODEC_ALIGN_DIMENSIONS2 sp_avcodec_align_dimensions2;
 static AVCODEC_FLUSH_BUFFERS sp_avcodec_flush_buffers;
-static AV_INIT_PACKET sp_av_init_packet;
+static AV_PACKET_ALLOC sp_av_packet_alloc; // sp_av_init_packet
+static AV_PACKET_FREE sp_av_packet_free;
 static AV_NEW_PACKET sp_av_new_packet;
-static AV_DESTRUCT_PACKET sp_av_destruct_packet;
-static AV_FREE_PACKET sp_av_free_packet;
-static AVCODEC_DECODE_AUDIO4 sp_avcodec_decode_audio4;    // 53.25.0
-static AVCODEC_DECODE_VIDEO2 sp_avcodec_decode_video2;    // 52.23.0
-// count: 27
+static AV_PACKET_UNREF sp_av_packet_unref;
+
+static AVCODEC_SEND_PACKET sp_avcodec_send_packet;    // 57
+static AVCODEC_RECEIVE_FRAME sp_avcodec_receive_frame;    // 57
+// count: +18 = 23
 
 // libavutil
+typedef AVPixFmtDescriptor* (APIENTRYP AV_PIX_FMT_DESC_GET)(enum AVPixelFormat pix_fmt); // lavu >= 51.45;  lavu 51: 'enum PixelFormat pix_fmt', lavu 53: 'enum AVPixelFormat pix_fmt'
 typedef void (APIENTRYP AV_FRAME_UNREF)(AVFrame *frame);
 typedef void* (APIENTRYP AV_REALLOC)(void *ptr, size_t size);
 typedef void (APIENTRYP AV_FREE)(void *ptr);
@@ -117,8 +104,12 @@ typedef AVDictionaryEntry* (APIENTRYP AV_DICT_GET)(AVDictionary *m, const char *
 typedef int (APIENTRYP AV_DICT_COUNT)(AVDictionary *m);
 typedef int (APIENTRYP AV_DICT_SET)(AVDictionary **pm, const char *key, const char *value, int flags);
 typedef void (APIENTRYP AV_DICT_FREE)(AVDictionary **m);
+typedef void (APIENTRYP AV_CHANNEL_LAYOUT_DEFAULT)(AVChannelLayoutPtr ch_layout, int nb_channels);
+typedef void (APIENTRYP AV_CHANNEL_LAYOUT_UNINIT)(AVChannelLayoutPtr ch_layout);
+typedef int (APIENTRYP AV_CHANNEL_LAYOUT_DESCRIBE)(AVChannelLayoutPtr ch_layout, char* buf, size_t buf_size);
+typedef int (APIENTRYP AV_OPT_SET_CHLAYOUT)(void *obj, const char *name, const AVChannelLayoutPtr val, int search_flags);
 
-static const AVPixFmtDescriptor* sp_av_pix_fmt_descriptors;
+static AV_PIX_FMT_DESC_GET sp_av_pix_fmt_desc_get;
 static AV_FRAME_UNREF sp_av_frame_unref;
 static AV_REALLOC sp_av_realloc;
 static AV_FREE sp_av_free;
@@ -130,13 +121,16 @@ static AV_DICT_GET sp_av_dict_get;
 static AV_DICT_COUNT sp_av_dict_count;
 static AV_DICT_SET sp_av_dict_set;
 static AV_DICT_FREE sp_av_dict_free;
-// count: 39
+static AV_CHANNEL_LAYOUT_DEFAULT sp_av_channel_layout_default; // >= 59
+static AV_CHANNEL_LAYOUT_UNINIT sp_av_channel_layout_uninit; // >= 59
+static AV_CHANNEL_LAYOUT_DESCRIBE sp_av_channel_layout_describe; // >= 59
+static AV_OPT_SET_CHLAYOUT sp_av_opt_set_chlayout; // >= 59
+// count: +16 = 39
 
 // libavformat
 typedef AVFormatContext *(APIENTRYP AVFORMAT_ALLOC_CONTEXT)(void);
 typedef void (APIENTRYP AVFORMAT_FREE_CONTEXT)(AVFormatContext *s);  // 52.96.0
 typedef void (APIENTRYP AVFORMAT_CLOSE_INPUT)(AVFormatContext **s);  // 53.17.0
-typedef void (APIENTRYP AV_REGISTER_ALL)(void);
 typedef AVInputFormat *(APIENTRYP AV_FIND_INPUT_FORMAT)(const char *short_name);
 typedef int (APIENTRYP AVFORMAT_OPEN_INPUT)(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
 typedef void (APIENTRYP AV_DUMP_FORMAT)(AVFormatContext *ic, int index, const char *url, int is_output);
@@ -152,7 +146,6 @@ typedef int (APIENTRYP AVFORMAT_FIND_STREAM_INFO)(AVFormatContext *ic, AVDiction
 static AVFORMAT_ALLOC_CONTEXT sp_avformat_alloc_context;
 static AVFORMAT_FREE_CONTEXT sp_avformat_free_context;            // 52.96.0 (not used, only for outfile cts)
 static AVFORMAT_CLOSE_INPUT sp_avformat_close_input;              // 53.17.0
-static AV_REGISTER_ALL sp_av_register_all;
 static AV_FIND_INPUT_FORMAT sp_av_find_input_format;
 static AVFORMAT_OPEN_INPUT sp_avformat_open_input;
 static AV_DUMP_FORMAT sp_av_dump_format;
@@ -164,27 +157,12 @@ static AV_READ_PAUSE sp_av_read_pause;
 static AVFORMAT_NETWORK_INIT sp_avformat_network_init;            // 53.13.0
 static AVFORMAT_NETWORK_DEINIT sp_avformat_network_deinit;        // 53.13.0
 static AVFORMAT_FIND_STREAM_INFO sp_avformat_find_stream_info;    // 53.3.0
-// count: 54
+// count: +14 = 53
 
 // libavdevice [53.0.0]
 typedef int (APIENTRYP AVDEVICE_REGISTER_ALL)(void);
 static AVDEVICE_REGISTER_ALL sp_avdevice_register_all;
-// count: 55
-
-// libavresample [1.0.1]
-typedef AVAudioResampleContext* (APIENTRYP AVRESAMPLE_ALLOC_CONTEXT)(void);  // 1.0.1
-typedef int (APIENTRYP AVRESAMPLE_OPEN)(AVAudioResampleContext *avr);  // 1.0.1
-typedef void (APIENTRYP AVRESAMPLE_CLOSE)(AVAudioResampleContext *avr);  // 1.0.1
-typedef void (APIENTRYP AVRESAMPLE_FREE)(AVAudioResampleContext **avr);  // 1.0.1
-typedef int (APIENTRYP AVRESAMPLE_CONVERT)(AVAudioResampleContext *avr, uint8_t **output,
-                      int out_plane_size, int out_samples, uint8_t **input,
-                      int in_plane_size, int in_samples);  // 1.0.1
-static AVRESAMPLE_ALLOC_CONTEXT sp_avresample_alloc_context;
-static AVRESAMPLE_OPEN sp_avresample_open;
-static AVRESAMPLE_CLOSE sp_avresample_close;
-static AVRESAMPLE_FREE sp_avresample_free;
-static AVRESAMPLE_CONVERT sp_avresample_convert;
-// count: 60
+// count: +1 = 54
 
 // libswresample [1...]
 typedef int (APIENTRYP AV_OPT_SET_SAMPLE_FMT)(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); // actually lavu .. but exist only w/ swresample!
@@ -198,7 +176,7 @@ static SWR_ALLOC sp_swr_alloc;
 static SWR_INIT sp_swr_init;
 static SWR_FREE sp_swr_free;
 static SWR_CONVERT sp_swr_convert;
-// count: 65
+// count: +5 = 59
 
 // We use JNI Monitor Locking, since this removes the need 
 // to statically link-in pthreads on window ..
@@ -222,7 +200,7 @@ static SWR_CONVERT sp_swr_convert;
     #define MY_MUTEX_UNLOCK(e,s)
 #endif
 
-#define SYMBOL_COUNT 65
+#define SYMBOL_COUNT 59
 
 JNIEXPORT jboolean JNICALL FF_FUNC(initSymbols0)
   (JNIEnv *env, jobject instance, jobject jmutex_avcodec_openclose, jobject jSymbols, jint count)
@@ -246,33 +224,29 @@ JNIEXPORT jboolean JNICALL FF_FUNC(initSymbols0)
     sp_avutil_version = (AVUTIL_VERSION) (intptr_t) symbols[i++];
     sp_avformat_version = (AVFORMAT_VERSION) (intptr_t) symbols[i++];
     sp_avcodec_version = (AVCODEC_VERSION) (intptr_t) symbols[i++];
-    sp_avresample_version = (AVRESAMPLE_VERSION) (intptr_t) symbols[i++];
+    sp_avdevice_version = (AVDEVICE_VERSION) (intptr_t) symbols[i++];
     sp_swresample_version = (SWRESAMPLE_VERSION) (intptr_t) symbols[i++];
 
-    sp_avcodec_register_all = (AVCODEC_REGISTER_ALL)  (intptr_t) symbols[i++];
     sp_avcodec_close = (AVCODEC_CLOSE)  (intptr_t) symbols[i++];
     sp_avcodec_string = (AVCODEC_STRING) (intptr_t) symbols[i++];
     sp_avcodec_find_decoder = (AVCODEC_FIND_DECODER) (intptr_t) symbols[i++];
+    sp_avcodec_alloc_context3 = (AVCODEC_ALLOC_CONTEXT3) (intptr_t) symbols[i++];
+    sp_avcodec_free_context = (AVCODEC_FREE_CONTEXT) (intptr_t) symbols[i++];
+    sp_avcodec_parameters_to_context = (AVCODEC_PARAMTERS_TO_CONTEXT) (intptr_t) symbols[i++];
     sp_avcodec_open2 = (AVCODEC_OPEN2) (intptr_t) symbols[i++];
-    sp_avcodec_alloc_frame = (AVCODEC_ALLOC_FRAME) (intptr_t) symbols[i++];
-    sp_avcodec_get_frame_defaults = (AVCODEC_GET_FRAME_DEFAULTS) (intptr_t) symbols[i++];
-    sp_avcodec_free_frame = (AVCODEC_FREE_FRAME) (intptr_t) symbols[i++];
-    sp_avcodec_default_get_buffer = (AVCODEC_DEFAULT_GET_BUFFER) (intptr_t) symbols[i++];
-    sp_avcodec_default_release_buffer = (AVCODEC_DEFAULT_RELEASE_BUFFER) (intptr_t) symbols[i++];
+    sp_av_frame_alloc = (AV_FRAME_ALLOC) (intptr_t) symbols[i++];
+    sp_av_free_frame = (AV_FREE_FRAME) (intptr_t) symbols[i++];
     sp_avcodec_default_get_buffer2 = (AVCODEC_DEFAULT_GET_BUFFER2) (intptr_t) symbols[i++];
-    sp_avcodec_get_edge_width = (AVCODEC_GET_EDGE_WIDTH) (intptr_t) symbols[i++];
     sp_av_image_fill_linesizes = (AV_IMAGE_FILL_LINESIZES) (intptr_t) symbols[i++];
-    sp_avcodec_align_dimensions = (AVCODEC_ALIGN_DIMENSIONS) (intptr_t) symbols[i++];
-    sp_avcodec_align_dimensions2 = (AVCODEC_ALIGN_DIMENSIONS2) (intptr_t) symbols[i++];
     sp_avcodec_flush_buffers = (AVCODEC_FLUSH_BUFFERS) (intptr_t) symbols[i++];
-    sp_av_init_packet = (AV_INIT_PACKET) (intptr_t) symbols[i++];
+    sp_av_packet_alloc = (AV_PACKET_ALLOC) (intptr_t) symbols[i++];
+    sp_av_packet_free = (AV_PACKET_FREE) (intptr_t) symbols[i++];
     sp_av_new_packet = (AV_NEW_PACKET) (intptr_t) symbols[i++];
-    sp_av_destruct_packet = (AV_DESTRUCT_PACKET) (intptr_t) symbols[i++];
-    sp_av_free_packet = (AV_FREE_PACKET) (intptr_t) symbols[i++];
-    sp_avcodec_decode_audio4 = (AVCODEC_DECODE_AUDIO4) (intptr_t) symbols[i++];
-    sp_avcodec_decode_video2 = (AVCODEC_DECODE_VIDEO2) (intptr_t) symbols[i++];
+    sp_av_packet_unref = (AV_PACKET_UNREF) (intptr_t) symbols[i++];
+    sp_avcodec_send_packet = (AVCODEC_SEND_PACKET) (intptr_t) symbols[i++];
+    sp_avcodec_receive_frame = (AVCODEC_RECEIVE_FRAME) (intptr_t) symbols[i++];
 
-    sp_av_pix_fmt_descriptors = (const AVPixFmtDescriptor*)  (intptr_t) symbols[i++];
+    sp_av_pix_fmt_desc_get = (AV_PIX_FMT_DESC_GET) (intptr_t) symbols[i++];
     sp_av_frame_unref = (AV_FRAME_UNREF) (intptr_t) symbols[i++];
     sp_av_realloc = (AV_REALLOC) (intptr_t) symbols[i++];
     sp_av_free = (AV_FREE) (intptr_t) symbols[i++];
@@ -284,11 +258,14 @@ JNIEXPORT jboolean JNICALL FF_FUNC(initSymbols0)
     sp_av_dict_count = (AV_DICT_COUNT) (intptr_t) symbols[i++];
     sp_av_dict_set = (AV_DICT_SET) (intptr_t) symbols[i++];
     sp_av_dict_free = (AV_DICT_FREE) (intptr_t) symbols[i++];
+    sp_av_channel_layout_default = (AV_CHANNEL_LAYOUT_DEFAULT) (intptr_t) symbols[i++];
+    sp_av_channel_layout_uninit = (AV_CHANNEL_LAYOUT_UNINIT) (intptr_t) symbols[i++];
+    sp_av_channel_layout_describe = (AV_CHANNEL_LAYOUT_DESCRIBE) (intptr_t) symbols[i++];
+    sp_av_opt_set_chlayout = (AV_OPT_SET_CHLAYOUT) (intptr_t) symbols[i++];
 
     sp_avformat_alloc_context = (AVFORMAT_ALLOC_CONTEXT) (intptr_t) symbols[i++];;
     sp_avformat_free_context = (AVFORMAT_FREE_CONTEXT) (intptr_t) symbols[i++];
     sp_avformat_close_input = (AVFORMAT_CLOSE_INPUT) (intptr_t) symbols[i++];
-    sp_av_register_all = (AV_REGISTER_ALL) (intptr_t) symbols[i++];
     sp_av_find_input_format = (AV_FIND_INPUT_FORMAT) (intptr_t) symbols[i++];
     sp_avformat_open_input = (AVFORMAT_OPEN_INPUT) (intptr_t) symbols[i++];
     sp_av_dump_format = (AV_DUMP_FORMAT) (intptr_t) symbols[i++];
@@ -303,12 +280,6 @@ JNIEXPORT jboolean JNICALL FF_FUNC(initSymbols0)
 
     sp_avdevice_register_all = (AVDEVICE_REGISTER_ALL) (intptr_t) symbols[i++];
 
-    sp_avresample_alloc_context = (AVRESAMPLE_ALLOC_CONTEXT) (intptr_t) symbols[i++];
-    sp_avresample_open = (AVRESAMPLE_OPEN) (intptr_t) symbols[i++];
-    sp_avresample_close = (AVRESAMPLE_CLOSE) (intptr_t) symbols[i++];
-    sp_avresample_free = (AVRESAMPLE_FREE) (intptr_t) symbols[i++];
-    sp_avresample_convert = (AVRESAMPLE_CONVERT) (intptr_t) symbols[i++];
-
     sp_av_opt_set_sample_fmt = (AV_OPT_SET_SAMPLE_FMT) (intptr_t) symbols[i++];
     sp_swr_alloc = (SWR_ALLOC) (intptr_t) symbols[i++];
     sp_swr_init = (SWR_INIT) (intptr_t) symbols[i++];
@@ -324,21 +295,23 @@ JNIEXPORT jboolean JNICALL FF_FUNC(initSymbols0)
         return JNI_FALSE;
     }
 
-    #if LIBAVCODEC_VERSION_MAJOR >= 55
-        if(!HAS_FUNC(sp_avcodec_default_get_buffer2) || 
-           !HAS_FUNC(sp_av_frame_unref) ) {
-            fprintf(stderr, "avcodec >= 55: avcodec_default_get_buffer2 %p, av_frame_unref %p\n", 
-                sp_avcodec_default_get_buffer2, sp_av_frame_unref);
-            return JNI_FALSE;
-        }
-    #else
-        if(!HAS_FUNC(sp_avcodec_default_get_buffer) || 
-           !HAS_FUNC(sp_avcodec_default_release_buffer)) {
-            fprintf(stderr, "avcodec < 55: avcodec_default_get_buffer %p, sp_avcodec_default_release_buffer %p\n", 
-                sp_avcodec_default_get_buffer2, sp_avcodec_default_release_buffer);
-            return JNI_FALSE;
-        }
-    #endif
+    if(!HAS_FUNC(sp_avcodec_default_get_buffer2) || 
+       !HAS_FUNC(sp_av_frame_unref) ) {
+        fprintf(stderr, "FFMPEGNatives.initSymbols0: avcodec >= 55: avcodec_default_get_buffer2 %p, av_frame_unref %p\n",
+            sp_avcodec_default_get_buffer2, sp_av_frame_unref);
+        return JNI_FALSE;
+    }
+
+#if LIBAVCODEC_VERSION_MAJOR >= 59
+    if( !HAS_FUNC(sp_av_channel_layout_default) ||
+        !HAS_FUNC(sp_av_channel_layout_uninit) ||
+        !HAS_FUNC(sp_av_channel_layout_describe) ||
+        !HAS_FUNC(sp_av_opt_set_chlayout)
+      ) {
+        fprintf(stderr, "FFMPEGNatives.initSymbols0: avcodec >= 59: av_channel_layout_* missing\n");
+        return JNI_FALSE;
+    }
+#endif
 
     #if defined (USE_PTHREAD_LOCKING)
         pthread_mutexattr_init(&renderLockAttr);
@@ -389,25 +362,12 @@ static void _setIsGLOriented(JNIEnv *env, FFMPEGToolBasicAV_t* pAV) {
 static void freeInstance(JNIEnv *env, FFMPEGToolBasicAV_t* pAV) {
     int i;
     if(NULL != pAV) {
-        // Close the A resampler
-        if( NULL != pAV->avResampleCtx ) {
-            sp_avresample_free(&pAV->avResampleCtx);
-            pAV->avResampleCtx = NULL;
-        }
-        if( NULL != pAV->swResampleCtx ) {
-            sp_swr_free(&pAV->swResampleCtx);
-            pAV->swResampleCtx = NULL;
-        }
-        if( NULL != pAV->aResampleBuffer ) {
-            sp_av_free(pAV->aResampleBuffer);
-            pAV->aResampleBuffer = NULL;
-        }
-
         MY_MUTEX_LOCK(env, mutex_avcodec_openclose);
         {
             // Close the V codec
             if(NULL != pAV->pVCodecCtx) {
                 sp_avcodec_close(pAV->pVCodecCtx);
+                sp_avcodec_free_context(&pAV->pVCodecCtx);
                 pAV->pVCodecCtx = NULL;
             }
             pAV->pVCodec=NULL;
@@ -415,20 +375,33 @@ static void freeInstance(JNIEnv *env, FFMPEGToolBasicAV_t* pAV) {
             // Close the A codec
             if(NULL != pAV->pACodecCtx) {
                 sp_avcodec_close(pAV->pACodecCtx);
+                sp_avcodec_free_context(&pAV->pACodecCtx);
                 pAV->pACodecCtx = NULL;
             }
             pAV->pACodec=NULL;
+
+            // Close the video file
+            if(NULL != pAV->pFormatCtx) {
+                sp_avformat_close_input(&pAV->pFormatCtx);
+                sp_avformat_free_context(pAV->pFormatCtx);
+                pAV->pFormatCtx = NULL;
+            }
         }
         MY_MUTEX_UNLOCK(env, mutex_avcodec_openclose);
 
+        // Close the A resampler
+        if( NULL != pAV->swResampleCtx ) {
+            sp_swr_free(&pAV->swResampleCtx);
+            pAV->swResampleCtx = NULL;
+        }
+        if( NULL != pAV->aResampleBuffer ) {
+            sp_av_free(pAV->aResampleBuffer);
+            pAV->aResampleBuffer = NULL;
+        }
+
         // Close the frames
         if(NULL != pAV->pVFrame) {
-            if(HAS_FUNC(sp_avcodec_free_frame)) {
-                sp_avcodec_free_frame(&pAV->pVFrame);
-            } else {
-                sp_av_free(pAV->pVFrame);
-            }
-            pAV->pVFrame = NULL;
+            sp_av_free_frame(&pAV->pVFrame);
         }
         if(NULL != pAV->pANIOBuffers) {
             for(i=0; i<pAV->aFrameCount; i++) {
@@ -446,33 +419,27 @@ static void freeInstance(JNIEnv *env, FFMPEGToolBasicAV_t* pAV) {
         }
         if(NULL != pAV->pAFrames) {
             for(i=0; i<pAV->aFrameCount; i++) {
-                if(HAS_FUNC(sp_avcodec_free_frame)) {
-                    sp_avcodec_free_frame(&pAV->pAFrames[i]);
-                } else {
-                    sp_av_free(pAV->pAFrames[i]);
-                }
+                sp_av_free_frame(&pAV->pAFrames[i]);
             }
             free(pAV->pAFrames);
             pAV->pAFrames = NULL;
         }
 
-        // Close the video file
-        if(NULL != pAV->pFormatCtx) {
-            sp_avformat_close_input(&pAV->pFormatCtx);
-            // Only for output files!
-            // sp_avformat_free_context(pAV->pFormatCtx);
-            pAV->pFormatCtx = NULL;
-        }
         if( NULL != pAV->ffmpegMediaPlayer ) {
             (*env)->DeleteGlobalRef(env, pAV->ffmpegMediaPlayer);
             pAV->ffmpegMediaPlayer = NULL;
         }
 
+        if( NULL != pAV->packet ) {
+            sp_av_packet_free(&pAV->packet);
+            pAV->packet = NULL;
+        }
+
         free(pAV);
     }
 }
 
-static int my_getPlaneCount(AVPixFmtDescriptor *pDesc) {
+static int my_getPlaneCount(const AVPixFmtDescriptor *pDesc) {
     int i, p=-1;
     for(i=pDesc->nb_components-1; i>=0; i--) {
         int p0 = pDesc->comp[i].plane;
@@ -524,9 +491,9 @@ JNIEXPORT jint JNICALL FF_FUNC(getAvCodecMajorVersionCC0)
     return (jint) LIBAVCODEC_VERSION_MAJOR;
 }
 
-JNIEXPORT jint JNICALL FF_FUNC(getAvResampleMajorVersionCC0)
+JNIEXPORT jint JNICALL FF_FUNC(getAvDeviceMajorVersionCC0)
   (JNIEnv *env, jobject instance) {
-    return (jint) LIBAVRESAMPLE_VERSION_MAJOR;
+    return (jint) LIBAVDEVICE_VERSION_MAJOR;
 }
 
 JNIEXPORT jint JNICALL FF_FUNC(getSwResampleMajorVersionCC0)
@@ -535,8 +502,7 @@ JNIEXPORT jint JNICALL FF_FUNC(getSwResampleMajorVersionCC0)
 }
 
 JNIEXPORT jlong JNICALL FF_FUNC(createInstance0)
-  (JNIEnv *env, jobject instance, jobject ffmpegMediaPlayer,
-   jboolean enableAvResample, jboolean enableSwResample, jboolean verbose)
+  (JNIEnv *env, jobject instance, jobject ffmpegMediaPlayer, jboolean verbose)
 {
     FFMPEGToolBasicAV_t * pAV = calloc(1, sizeof(FFMPEGToolBasicAV_t));
     if(NULL==pAV) {
@@ -546,26 +512,23 @@ JNIEXPORT jlong JNICALL FF_FUNC(createInstance0)
     pAV->avcodecVersion = sp_avcodec_version();
     pAV->avformatVersion = sp_avformat_version(); 
     pAV->avutilVersion = sp_avutil_version();
-    if(HAS_FUNC(sp_avresample_version) && enableAvResample) {
-        pAV->avresampleVersion = sp_avresample_version();
+    if( HAS_FUNC(sp_avdevice_version) ) {
+        pAV->avdeviceVersion = sp_avdevice_version();
     } else {
-        pAV->avresampleVersion = 0;
+        pAV->avdeviceVersion = 0;
     }
-    if(HAS_FUNC(sp_swresample_version) && enableSwResample) {
+    if( HAS_FUNC(sp_swresample_version) ) {
         pAV->swresampleVersion = sp_swresample_version();
     } else {
         pAV->swresampleVersion = 0;
     }
 
-    #if LIBAVCODEC_VERSION_MAJOR >= 55
-        // TODO: We keep code on using 1 a/v frame per decoding cycle now.
-        //       This is compatible w/ OpenAL's alBufferData(..)
-        //       and w/ OpenGL's texture update command, both copy data immediatly.
-        // pAV->useRefCountedFrames = AV_HAS_API_REFCOUNTED_FRAMES(pAV);
-        pAV->useRefCountedFrames = 0;
-    #else
-        pAV->useRefCountedFrames = 0;
-    #endif
+    // NOTE: We keep code on using 1 a/v frame per decoding cycle now.
+    //       This is compatible w/ OpenAL's alBufferData(..)
+    //       and w/ OpenGL's texture update command, both copy data immediately.
+    //
+    // NOTE: ffmpeg using `avcodec_receive_frame()` always uses `refcounted_frames`, i.e. always true now!
+    // pAV->useRefCountedFrames = 1;
 
     pAV->ffmpegMediaPlayer = (*env)->NewGlobalRef(env, ffmpegMediaPlayer);
     pAV->verbose = verbose;
@@ -573,8 +536,8 @@ JNIEXPORT jlong JNICALL FF_FUNC(createInstance0)
     pAV->aid=AV_STREAM_ID_AUTO;
 
     if(pAV->verbose) {
-        fprintf(stderr, "Info: Has avresample %d, swresample %d, device %d, refCount %d\n", 
-            AV_HAS_API_AVRESAMPLE(pAV), AV_HAS_API_SWRESAMPLE(pAV), HAS_FUNC(sp_avdevice_register_all), pAV->useRefCountedFrames);
+        fprintf(stderr, "Info: Has swresample %d, device %d\n",
+                AV_HAS_API_SWRESAMPLE(pAV), HAS_FUNC(sp_avdevice_register_all));
     }
     return (jlong) (intptr_t) pAV;
 }
@@ -589,6 +552,7 @@ JNIEXPORT void JNICALL FF_FUNC(destroyInstance0)
   }
 }
 
+#if LIBAVCODEC_VERSION_MAJOR < 59
 static uint64_t getDefaultAudioChannelLayout(int channelCount) {
     switch(channelCount) {
         case 1: return AV_CH_LAYOUT_MONO;
@@ -602,6 +566,24 @@ static uint64_t getDefaultAudioChannelLayout(int channelCount) {
         default: return AV_CH_LAYOUT_NATIVE;
     }
 }
+#else
+static void getDefaultAVChannelLayout(AVChannelLayout* cl, int channelCount) {
+    sp_av_channel_layout_uninit(cl);
+    switch(channelCount) {
+        case 1: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO; break;
+        case 2: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO; break;
+        case 3: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_SURROUND; break;
+        case 4: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_QUAD; break;
+        case 5: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0; break;
+        case 6: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT1; break;
+        case 7: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_6POINT1; break;
+        case 8: *cl = (AVChannelLayout)AV_CHANNEL_LAYOUT_7POINT1; break;
+        default: {
+            sp_av_channel_layout_default(cl, channelCount);
+        }
+    }
+}
+#endif
 
 static void initPTSStats(PTSStats *ptsStats);
 static int64_t evalPTS(PTSStats *ptsStats, int64_t inPTS, int64_t inDTS);
@@ -675,6 +657,16 @@ static void getAlignedLinesizes(AVCodecContext *avctx, int linesize[/*4*/]) {
 }
 #endif
 
+#if LIBAVCODEC_VERSION_MAJOR < 60
+static int64_t getFrameNum(const AVCodecContext *avctx) {
+    return (int64_t)avctx->frame_number;
+}
+#else
+static int64_t getFrameNum(const AVCodecContext *avctx) {
+    return avctx->frame_num;
+}
+#endif
+
 JNIEXPORT void JNICALL FF_FUNC(setStream0)
   (JNIEnv *env, jobject instance, jlong ptr, jstring jURL, jboolean jIsCameraInput, 
    jint vid, jstring jSizeS, jint vWidth, jint vHeight, jint vRate,
@@ -691,16 +683,19 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
     }
 
     // Register all formats and codecs
-    sp_avcodec_register_all();
     if( jIsCameraInput && HAS_FUNC(sp_avdevice_register_all) ) {
         sp_avdevice_register_all();
     }
-    sp_av_register_all();
     // Network too ..
     if(HAS_FUNC(sp_avformat_network_init)) {
         sp_avformat_network_init();
     }
 
+    pAV->packet = sp_av_packet_alloc();
+    if( NULL == pAV->packet ) {
+        JoglCommon_throwNewRuntimeException(env, "Couldn't allocate AVPacket");
+        return;
+    }
     pAV->pFormatCtx = sp_avformat_alloc_context();
 
     const char *urlPath = (*env)->GetStringUTFChars(env, jURL, &iscopy);
@@ -812,14 +807,14 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
     for(i=0; ( AV_STREAM_ID_AUTO==pAV->aid || AV_STREAM_ID_AUTO==pAV->vid ) && i<pAV->pFormatCtx->nb_streams; i++) {
         AVStream *st = pAV->pFormatCtx->streams[i];
         if(pAV->verbose) {
-            fprintf(stderr, "Stream: %d: is-video %d, is-audio %d\n", i, (AVMEDIA_TYPE_VIDEO == st->codec->codec_type), AVMEDIA_TYPE_AUDIO == st->codec->codec_type);
+            fprintf(stderr, "Stream: %d: is-video %d, is-audio %d\n", i, (AVMEDIA_TYPE_VIDEO == st->codecpar->codec_type), AVMEDIA_TYPE_AUDIO == st->codecpar->codec_type);
         }
-        if(AVMEDIA_TYPE_VIDEO == st->codec->codec_type) {
+        if(AVMEDIA_TYPE_VIDEO == st->codecpar->codec_type) {
             if(AV_STREAM_ID_AUTO==pAV->vid && (AV_STREAM_ID_AUTO==vid || vid == i) ) {
                 pAV->pVStream = st;
                 pAV->vid=i;
             }
-        } else if(AVMEDIA_TYPE_AUDIO == st->codec->codec_type) {
+        } else if(AVMEDIA_TYPE_AUDIO == st->codecpar->codec_type) {
             if(AV_STREAM_ID_AUTO==pAV->aid && (AV_STREAM_ID_AUTO==aid || aid == i) ) {
                 pAV->pAStream = st;
                 pAV->aid=i;
@@ -838,7 +833,7 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
     }
 
     if(0<=pAV->aid) {
-        AVFrame * pAFrame0 = sp_avcodec_alloc_frame();
+        AVFrame * pAFrame0 = sp_av_frame_alloc();
         if( NULL == pAFrame0 ) {
             JoglCommon_throwNewRuntimeException(env, "Couldn't alloc 1st audio frame\n");
             return;
@@ -846,11 +841,30 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
 
         // Get a pointer to the codec context for the audio stream
         // FIXME: Libav Binary compatibility! JAU01
-        pAV->pACodecCtx=pAV->pAStream->codec;
+        pAV->pACodecPar=pAV->pAStream->codecpar;
 
         // FIXME: Libav Binary compatibility! JAU01
-        if (pAV->pACodecCtx->bit_rate) {
-            pAV->bps_audio = pAV->pACodecCtx->bit_rate;
+        if (pAV->pACodecPar->bit_rate) {
+            pAV->bps_audio = pAV->pACodecPar->bit_rate;
+        }
+
+        // Find the decoder for the audio stream
+        pAV->pACodec=sp_avcodec_find_decoder(pAV->pACodecPar->codec_id);
+        if(pAV->pACodec==NULL) {
+            JoglCommon_throwNewRuntimeException(env, "Couldn't find audio codec for codec_id %d", pAV->pACodecPar->codec_id);
+            return;
+        }
+
+        // Allocate the decoder context for the audio stream
+        pAV->pACodecCtx = sp_avcodec_alloc_context3(pAV->pACodec);
+        if(pAV->pACodecCtx==NULL) {
+            JoglCommon_throwNewRuntimeException(env, "Couldn't allocate audio decoder context for codec_id %d", pAV->pACodecPar->codec_id);
+            return;
+        }
+        res = sp_avcodec_parameters_to_context(pAV->pACodecCtx, pAV->pACodecPar);
+        if(res<0) {
+            JoglCommon_throwNewRuntimeException(env, "Couldn't copy audio codec-par to context");
+            return;
         }
 
         // Customize ..
@@ -864,30 +878,13 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
         // Note: OpenAL well supports n-channel by now (SOFT),
         //       however - AFAIK AV_SAMPLE_FMT_S16 would allow no conversion!
         pAV->pACodecCtx->request_sample_fmt=AV_SAMPLE_FMT_S16;
-        if( 1 <= aMaxChannelCount && aMaxChannelCount <= 2 ) {
-            pAV->pACodecCtx->request_channel_layout=getDefaultAudioChannelLayout(aMaxChannelCount);
-            #if LIBAVCODEC_VERSION_MAJOR < 54
-                /** Until 55.0.0, but stopped working w/ 54 already :( */
-                pAV->pACodecCtx->request_channels=aMaxChannelCount;
-            #endif
-        }
         pAV->pACodecCtx->skip_frame=AVDISCARD_DEFAULT;
 
         sp_avcodec_string(pAV->acodec, sizeof(pAV->acodec), pAV->pACodecCtx, 0);
 
-        // Find the decoder for the audio stream
-        pAV->pACodec=sp_avcodec_find_decoder(pAV->pACodecCtx->codec_id);
-        if(pAV->pACodec==NULL) {
-            JoglCommon_throwNewRuntimeException(env, "Couldn't find audio codec %d, %s", pAV->pACodecCtx->codec_id, pAV->acodec);
-            return;
-        }
-
         // Open codec
         MY_MUTEX_LOCK(env, mutex_avcodec_openclose);
         {
-            #if LIBAVCODEC_VERSION_MAJOR >= 55
-                pAV->pACodecCtx->refcounted_frames = pAV->useRefCountedFrames;
-            #endif
             res = sp_avcodec_open2(pAV->pACodecCtx, pAV->pACodec, NULL);
         }
         MY_MUTEX_UNLOCK(env, mutex_avcodec_openclose);
@@ -895,32 +892,44 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
             JoglCommon_throwNewRuntimeException(env, "Couldn't open audio codec %d, %s", pAV->pACodecCtx->codec_id, pAV->acodec);
             return;
         }
-        if (!pAV->pACodecCtx->channel_layout) {
-            pAV->pACodecCtx->channel_layout = getDefaultAudioChannelLayout(pAV->pACodecCtx->channels);
+        // try to shape audio channel-layout on fixed audio channel-count
+#if LIBAVCODEC_VERSION_MAJOR < 59
+        pAV->aChannels = pAV->pACodecCtx->channels;
+        if ( !pAV->pACodecCtx->channel_layout ) {
+            const uint64_t cl = getDefaultAudioChannelLayout(pAV->aChannels);
+            if ( !cl ) {
+                JoglCommon_throwNewRuntimeException(env, "Couldn't determine channel layout of %d channels\n", pAV->aChannels);
+                return;
+            }
+            pAV->pACodecCtx->channel_layout = cl;
         }
-        if (!pAV->pACodecCtx->channel_layout) {
-            JoglCommon_throwNewRuntimeException(env, "Couldn't determine channel layout of %d channels\n", pAV->pACodecCtx->channels);
-            return;
+        if( pAV->verbose ) {
+            fprintf(stderr, "A channels %d, layout 0x%"PRIx64"\n",
+                    pAV->aChannels, pAV->pACodecCtx->channel_layout);
+        }
+#else
+        pAV->aChannels = pAV->pACodecCtx->ch_layout.nb_channels;
+        if ( pAV->pACodecCtx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC ) {
+            getDefaultAVChannelLayout(&pAV->pACodecCtx->ch_layout, pAV->aChannels);
+        }
+        if( pAV->verbose ) {
+            char buf[256];
+            sp_av_channel_layout_describe(&pAV->pACodecCtx->ch_layout, buf, sizeof(buf));
+            fprintf(stderr, "A channels %d, layout %s\n", pAV->aChannels, buf);
         }
+#endif
         pAV->aSampleRate = pAV->pACodecCtx->sample_rate;
-        pAV->aChannels = pAV->pACodecCtx->channels;
         pAV->aFrameSize = pAV->pACodecCtx->frame_size; // in samples per channel!
         pAV->aSampleFmt = pAV->pACodecCtx->sample_fmt;
         pAV->frames_audio = pAV->pAStream->nb_frames;
         pAV->aSinkSupport = _isAudioFormatSupported(env, pAV->ffmpegMediaPlayer, pAV->aSampleFmt, pAV->aSampleRate, pAV->aChannels);
         if( pAV->verbose ) {
-            fprintf(stderr, "A channels %d [l %"PRId64"], sample_rate %d, frame_size %d, frame_number %d, [afps %f, cfps %f, sfps %f], nb_frames %"PRId64", [maxChan %d, prefRate %d, req_chan_layout %"PRId64", req_chan %d], sink-support %d \n", 
-                pAV->aChannels, pAV->pACodecCtx->channel_layout, pAV->aSampleRate, pAV->aFrameSize, pAV->pACodecCtx->frame_number,
+            fprintf(stderr, "A sample_rate %d, frame_size %d, frame_number %"PRId64", [afps %f, sfps %f], nb_frames %"PRId64", [maxChan %d, prefRate %d], sink-support %d \n",
+                pAV->aSampleRate, pAV->aFrameSize, getFrameNum(pAV->pACodecCtx),
                 my_av_q2f(pAV->pAStream->avg_frame_rate),
-                my_av_q2f_r(pAV->pAStream->codec->time_base),
                 my_av_q2f_r(pAV->pAStream->time_base),
                 pAV->pAStream->nb_frames,
-                aMaxChannelCount, aPrefSampleRate, pAV->pACodecCtx->request_channel_layout,
-                #if LIBAVCODEC_VERSION_MAJOR < 54
-                    pAV->pACodecCtx->request_channels,
-                #else
-                    0,
-                #endif
+                aMaxChannelCount, aPrefSampleRate,
                 pAV->aSinkSupport);
         }
 
@@ -929,11 +938,12 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
         pAV->aChannelsOut = pAV->aChannels;
         pAV->aSampleRateOut = pAV->aSampleRate;
 
-        if( ( AV_HAS_API_AVRESAMPLE(pAV) || AV_HAS_API_SWRESAMPLE(pAV) ) && 
+        if( ( AV_HAS_API_SWRESAMPLE(pAV) ) && 
             ( pAV->aSampleFmt != AV_SAMPLE_FMT_S16 || 
             ( 0 != aPrefSampleRate && pAV->aSampleRate != aPrefSampleRate ) || 
-              !pAV->aSinkSupport ) ) {
-
+              !pAV->aSinkSupport ) )
+        {
+            const int32_t maxOutChannelCount = MIN_INT(aMaxChannelCount, MAX_INT(1, pAV->aChannels));
             if( 0 == aPrefSampleRate ) {
                 aPrefSampleRate = pAV->aSampleRate;
             }
@@ -941,65 +951,64 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
             enum AVSampleFormat aSampleFmtOut = AV_SAMPLE_FMT_S16;
             int32_t aChannelsOut;
             int32_t aSampleRateOut;
-            int32_t minChannelCount = MIN_INT(aMaxChannelCount,pAV->pACodecCtx->channels);
             
-            if( _isAudioFormatSupported(env, pAV->ffmpegMediaPlayer, aSampleFmtOut, aPrefSampleRate, pAV->pACodecCtx->channels) ) {
-                aChannelsOut = pAV->pACodecCtx->channels;
+            if( _isAudioFormatSupported(env, pAV->ffmpegMediaPlayer, aSampleFmtOut, aPrefSampleRate, pAV->aChannels) ) {
+                aChannelsOut = pAV->aChannels;
                 aSampleRateOut = aPrefSampleRate;
                 aSinkSupport = 1;
-            } else if( _isAudioFormatSupported(env, pAV->ffmpegMediaPlayer, aSampleFmtOut, aPrefSampleRate, minChannelCount) ) {
-                aChannelsOut = minChannelCount;
+            } else if( _isAudioFormatSupported(env, pAV->ffmpegMediaPlayer, aSampleFmtOut, aPrefSampleRate, maxOutChannelCount) ) {
+                aChannelsOut = maxOutChannelCount;
                 aSampleRateOut = aPrefSampleRate;
                 aSinkSupport = 1;
             }
 
-            if( aSinkSupport ) {
-                if( AV_HAS_API_AVRESAMPLE(pAV) ) {
-                    pAV->avResampleCtx = sp_avresample_alloc_context();
-                    sp_av_opt_set_int(pAV->avResampleCtx, "in_channel_layout",  pAV->pACodecCtx->channel_layout,            0);
-                    sp_av_opt_set_int(pAV->avResampleCtx, "out_channel_layout", getDefaultAudioChannelLayout(aChannelsOut), 0);
-                    sp_av_opt_set_int(pAV->avResampleCtx, "in_sample_rate",     pAV->aSampleRate,                           0);
-                    sp_av_opt_set_int(pAV->avResampleCtx, "out_sample_rate",    aSampleRateOut,                             0);
-                    sp_av_opt_set_int(pAV->avResampleCtx, "in_sample_fmt",      pAV->aSampleFmt,                            0);
-                    sp_av_opt_set_int(pAV->avResampleCtx, "out_sample_fmt",     aSampleFmtOut,                              0);
-
-                    if ( sp_avresample_open(pAV->avResampleCtx) < 0 ) {
-                        sp_avresample_free(&pAV->avResampleCtx);
-                        pAV->avResampleCtx = NULL;
-                        fprintf(stderr, "error initializing avresample ctx\n");
-                    } else {
-                        // OK
-                        pAV->aSampleFmtOut = aSampleFmtOut;
-                        pAV->aChannelsOut = aChannelsOut;
-                        pAV->aSampleRateOut = aSampleRateOut;
-                        pAV->aSinkSupport = 1;
-                    }
-                } else if( AV_HAS_API_SWRESAMPLE(pAV) ) {
-                    pAV->swResampleCtx = sp_swr_alloc();
-                    sp_av_opt_set_int(pAV->swResampleCtx,        "in_channel_layout",  pAV->pACodecCtx->channel_layout,            0);
-                    sp_av_opt_set_int(pAV->swResampleCtx,        "out_channel_layout", getDefaultAudioChannelLayout(aChannelsOut), 0);
-                    sp_av_opt_set_int(pAV->swResampleCtx,        "in_sample_rate",     pAV->aSampleRate,                           0);
-                    sp_av_opt_set_int(pAV->swResampleCtx,        "out_sample_rate",    aSampleRateOut,                             0);
-                    sp_av_opt_set_sample_fmt(pAV->swResampleCtx, "in_sample_fmt",      pAV->aSampleFmt,                            0);
-                    sp_av_opt_set_sample_fmt(pAV->swResampleCtx, "out_sample_fmt",     aSampleFmtOut,                              0);
-
-                    if ( sp_swr_init(pAV->swResampleCtx) < 0 ) {
-                        sp_swr_free(&pAV->swResampleCtx);
-                        pAV->swResampleCtx = NULL;
-                        fprintf(stderr, "error initializing swresample ctx\n");
-                    } else {
-                        // OK
-                        pAV->aSampleFmtOut = aSampleFmtOut;
-                        pAV->aChannelsOut = aChannelsOut;
-                        pAV->aSampleRateOut = aSampleRateOut;
-                        pAV->aSinkSupport = 1;
-                    }
+            if( aSinkSupport && AV_HAS_API_SWRESAMPLE(pAV) ) {
+                pAV->swResampleCtx = sp_swr_alloc();
+#if LIBAVCODEC_VERSION_MAJOR < 59
+                const int64_t out_channel_layout = getDefaultAudioChannelLayout(aChannelsOut);
+                sp_av_opt_set_int(pAV->swResampleCtx,        "in_channel_layout",  pAV->pACodecCtx->channel_layout,            0);
+                sp_av_opt_set_int(pAV->swResampleCtx,        "out_channel_layout", out_channel_layout, 0);
+                if( pAV->verbose ) {
+                    fprintf(stderr, "A Resample: channels %d -> %d, layout 0x%"PRIx64" -> 0x%"PRIx64", rate %d -> %d, fmt 0x%x -> 0x%x\n",
+                            pAV->aChannels, aChannelsOut, pAV->pACodecCtx->channel_layout, out_channel_layout,
+                            pAV->aSampleRate, aSampleRateOut, (int)pAV->aSampleFmt, (int)aSampleFmtOut);
+                }
+#else
+                AVChannelLayout out_ch_layout = {0};
+                getDefaultAVChannelLayout(&out_ch_layout, aChannelsOut);
+                sp_av_opt_set_chlayout(pAV->swResampleCtx,        "in_chlayout",  &pAV->pACodecCtx->ch_layout,                 0);
+                sp_av_opt_set_chlayout(pAV->swResampleCtx,        "out_chlayout", &out_ch_layout,                              0);
+                if( pAV->verbose ) {
+                    char buf1[256], buf2[256];
+                    sp_av_channel_layout_describe(&pAV->pACodecCtx->ch_layout, buf1, sizeof(buf1));
+                    sp_av_channel_layout_describe(&out_ch_layout, buf2, sizeof(buf2));
+                    fprintf(stderr, "A Resample: channels %d -> %d, layout %s -> %s, rate %d -> %d, fmt 0x%x -> 0x%x\n",
+                            pAV->aChannels, aChannelsOut, buf1, buf2,
+                            pAV->aSampleRate, aSampleRateOut, (int)pAV->aSampleFmt, (int)aSampleFmtOut);
+                }
+                av_channel_layout_uninit(&out_ch_layout);
+#endif
+                sp_av_opt_set_int(pAV->swResampleCtx,        "in_sample_rate",     pAV->aSampleRate,                           0);
+                sp_av_opt_set_int(pAV->swResampleCtx,        "out_sample_rate",    aSampleRateOut,                             0);
+                sp_av_opt_set_sample_fmt(pAV->swResampleCtx, "in_sample_fmt",      pAV->aSampleFmt,                            0);
+                sp_av_opt_set_sample_fmt(pAV->swResampleCtx, "out_sample_fmt",     aSampleFmtOut,                              0);
+
+                if ( sp_swr_init(pAV->swResampleCtx) < 0 ) {
+                    sp_swr_free(&pAV->swResampleCtx);
+                    pAV->swResampleCtx = NULL;
+                    fprintf(stderr, "error initializing swresample ctx\n");
+                } else {
+                    // OK
+                    pAV->aSampleFmtOut = aSampleFmtOut;
+                    pAV->aChannelsOut = aChannelsOut;
+                    pAV->aSampleRateOut = aSampleRateOut;
+                    pAV->aSinkSupport = 1;
                 }
             }
         }
         if(pAV->verbose) {
-            fprintf(stderr, "Info: Need resample %d, Use avresample %d, swresample %d\n", 
-                pAV->aSinkSupport, NULL!=pAV->avResampleCtx, NULL!=pAV->swResampleCtx);
+            fprintf(stderr, "Info: Need resample %d, Use swresample %d\n", 
+                pAV->aSinkSupport, NULL!=pAV->swResampleCtx);
         }
 
         // Allocate audio frames
@@ -1009,7 +1018,7 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
         pAV->pAFrames = calloc(pAV->aFrameCount, sizeof(AVFrame*));
         pAV->pAFrames[0] = pAFrame0;
         for(i=1; i<pAV->aFrameCount; i++) {
-            pAV->pAFrames[i] = sp_avcodec_alloc_frame();
+            pAV->pAFrames[i] = sp_av_frame_alloc();
             if( NULL == pAV->pAFrames[i] ) {
                 JoglCommon_throwNewRuntimeException(env, "Couldn't alloc audio frame %d / %d", i, pAV->aFrameCount);
                 return;
@@ -1021,16 +1030,34 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
     if(0<=pAV->vid) {
         // Get a pointer to the codec context for the video stream
         // FIXME: Libav Binary compatibility! JAU01
-        pAV->pVCodecCtx=pAV->pVStream->codec;
+        pAV->pVCodecPar = pAV->pVStream->codecpar;
         #if 0
         pAV->pVCodecCtx->get_format = my_get_format;
         #endif
 
-        if (pAV->pVCodecCtx->bit_rate) {
+        if (pAV->pVCodecPar->bit_rate) {
             // FIXME: Libav Binary compatibility! JAU01
-            pAV->bps_video = pAV->pVCodecCtx->bit_rate;
+            pAV->bps_video = pAV->pVCodecPar->bit_rate;
+        }
+
+        // Find the decoder for the video stream
+        pAV->pVCodec=sp_avcodec_find_decoder(pAV->pVCodecPar->codec_id);
+        if(pAV->pVCodec==NULL) {
+            JoglCommon_throwNewRuntimeException(env, "Couldn't find video codec for codec_id %d", pAV->pVCodecPar->codec_id);
+            return;
         }
 
+        // Allocate the decoder context for the video stream
+        pAV->pVCodecCtx = sp_avcodec_alloc_context3(pAV->pVCodec);
+        if(pAV->pVCodecCtx==NULL) {
+            JoglCommon_throwNewRuntimeException(env, "Couldn't allocate video decoder context for codec_id %d", pAV->pVCodecPar->codec_id);
+            return;
+        }
+        res = sp_avcodec_parameters_to_context(pAV->pVCodecCtx, pAV->pVCodecPar);
+        if(res<0) {
+            JoglCommon_throwNewRuntimeException(env, "Couldn't copy video codec-par to context");
+            return;
+        }
         // Customize ..
         // pAV->pVCodecCtx->thread_count=2;
         // pAV->pVCodecCtx->thread_type=FF_THREAD_FRAME|FF_THREAD_SLICE; // Decode more than one frame at once
@@ -1041,19 +1068,9 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
 
         sp_avcodec_string(pAV->vcodec, sizeof(pAV->vcodec), pAV->pVCodecCtx, 0);
 
-        // Find the decoder for the video stream
-        pAV->pVCodec=sp_avcodec_find_decoder(pAV->pVCodecCtx->codec_id);
-        if(pAV->pVCodec==NULL) {
-            JoglCommon_throwNewRuntimeException(env, "Couldn't find video codec %d, %s", pAV->pVCodecCtx->codec_id, pAV->vcodec);
-            return;
-        }
-
         // Open codec
         MY_MUTEX_LOCK(env, mutex_avcodec_openclose);
         {
-            #if LIBAVCODEC_VERSION_MAJOR >= 55
-                pAV->pVCodecCtx->refcounted_frames = pAV->useRefCountedFrames;
-            #endif
             res = sp_avcodec_open2(pAV->pVCodecCtx, pAV->pVCodec, NULL);
         }
         MY_MUTEX_UNLOCK(env, mutex_avcodec_openclose);
@@ -1070,12 +1087,6 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
         // FIXME: Libav Binary compatibility! JAU01
         if( pAV->pVStream->avg_frame_rate.den && pAV->pVStream->avg_frame_rate.num ) {
             pAV->fps = my_av_q2f(pAV->pVStream->avg_frame_rate);
-        #if LIBAVCODEC_VERSION_MAJOR < 55
-        } else if( pAV->pVStream->r_frame_rate.den && pAV->pVStream->r_frame_rate.num ) {
-            pAV->fps = my_av_q2f(pAV->pVStream->r_frame_rate);
-        #endif
-        } else if( pAV->pVStream->codec->time_base.den && pAV->pVStream->codec->time_base.num ) {
-            pAV->fps = my_av_q2f_r(pAV->pVStream->codec->time_base);
         } else if( pAV->pVStream->time_base.den && pAV->pVStream->time_base.num ) {
             pAV->fps = my_av_q2f_r(pAV->pVStream->time_base);
         } else {
@@ -1087,31 +1098,30 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
         // FIXME: Libav Binary compatibility! JAU01
         pAV->vWidth = pAV->pVCodecCtx->width;
         pAV->vHeight = pAV->pVCodecCtx->height;
-        pAV->vPixFmt = pAV->pVCodecCtx->pix_fmt;
+        pAV->vPixFmt = pAV->pVCodecCtx->pix_fmt; // AV_PIX_FMT_NONE
         pAV->vFlipped = JNI_FALSE;
         {   
-            AVPixFmtDescriptor pixDesc = sp_av_pix_fmt_descriptors[pAV->vPixFmt];
-            pAV->vBitsPerPixel = sp_av_get_bits_per_pixel(&pixDesc);
-            pAV->vBufferPlanes = my_getPlaneCount(&pixDesc);
+            const AVPixFmtDescriptor* pixDesc = sp_av_pix_fmt_desc_get(pAV->vPixFmt);
+            if( NULL != pixDesc ) {
+                pAV->vBitsPerPixel = sp_av_get_bits_per_pixel(pixDesc);
+                pAV->vBufferPlanes = my_getPlaneCount(pixDesc);
+            } else {
+                JoglCommon_throwNewRuntimeException(env, "Couldn't query AVPixFmtDescriptor from v-ctx pix_fmt 0x%x", (int)pAV->vPixFmt);
+                return;
+            }
         }
 
         if( pAV->verbose ) {
-            fprintf(stderr, "V frame_size %d, frame_number %d, [afps %f, rfps %f, cfps %f, sfps %f] -> %f fps, nb_frames %"PRId64", size %dx%d, fmt 0x%X, bpp %d, planes %d, codecCaps 0x%X\n", 
-                pAV->pVCodecCtx->frame_size, pAV->pVCodecCtx->frame_number, 
+            fprintf(stderr, "V frame_size %d, frame_number %"PRId64", [afps %f, sfps %f] -> %f fps, nb_frames %"PRId64", size %dx%d, fmt 0x%X, bpp %d, planes %d, codecCaps 0x%X\n",
+                pAV->pVCodecCtx->frame_size, getFrameNum(pAV->pVCodecCtx),
                 my_av_q2f(pAV->pVStream->avg_frame_rate),
-                #if LIBAVCODEC_VERSION_MAJOR < 55
-                    my_av_q2f(pAV->pVStream->r_frame_rate),
-                #else
-                    0.0f,
-                #endif
-                my_av_q2f_r(pAV->pVStream->codec->time_base),
                 my_av_q2f_r(pAV->pVStream->time_base),
                 pAV->fps,
                 pAV->pVStream->nb_frames,
                 pAV->vWidth, pAV->vHeight, pAV->vPixFmt, pAV->vBitsPerPixel, pAV->vBufferPlanes, pAV->pVCodecCtx->codec->capabilities);
         }
 
-        pAV->pVFrame=sp_avcodec_alloc_frame();
+        pAV->pVFrame=sp_av_frame_alloc();
         if( pAV->pVFrame == NULL ) {
             JoglCommon_throwNewRuntimeException(env, "Couldn't alloc video frame");
             return;
@@ -1120,11 +1130,7 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
         pAV->pVFrame->width = pAV->pVCodecCtx->width;
         pAV->pVFrame->height = pAV->pVCodecCtx->height;
         pAV->pVFrame->format = pAV->pVCodecCtx->pix_fmt;
-        #if LIBAVCODEC_VERSION_MAJOR >= 55
-            res = sp_avcodec_default_get_buffer2(pAV->pVCodecCtx, pAV->pVFrame, 0);
-        #else
-            res = sp_avcodec_default_get_buffer(pAV->pVCodecCtx, pAV->pVFrame);
-        #endif
+        res = sp_avcodec_default_get_buffer2(pAV->pVCodecCtx, pAV->pVFrame, 0);
         if(0!=res) {
             JoglCommon_throwNewRuntimeException(env, "Couldn't peek video buffer dimension");
             return;
@@ -1138,23 +1144,15 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
             }
             int32_t vLinesize[4];
             if( pAV->vBufferPlanes > 1 ) {
-                #if 0
-                    getAlignedLinesizes(pAV->pVCodecCtx, vLinesize);
-                    for(i=0; i<pAV->vBufferPlanes; i++) {
-                        // FIXME: Libav Binary compatibility! JAU01
-                        pAV->vTexWidth[i] = vLinesize[i] / pAV->vBytesPerPixelPerPlane ;
-                    }
-                #else
-                    for(i=0; i<pAV->vBufferPlanes; i++) {
-                        // FIXME: Libav Binary compatibility! JAU01
-                        vLinesize[i] = pAV->pVFrame->linesize[i];
-                        pAV->vTexWidth[i] = vLinesize[i] / pAV->vBytesPerPixelPerPlane ;
-                    }
-                #endif
+                for(i=0; i<pAV->vBufferPlanes; i++) {
+                    // FIXME: Libav Binary compatibility! JAU01
+                    vLinesize[i] = pAV->pVFrame->linesize[i];
+                    pAV->vTexWidth[i] = vLinesize[i] / pAV->vBytesPerPixelPerPlane ;
+                }
             } else {
                 vLinesize[0] = pAV->pVFrame->linesize[0];
-                if( pAV->vPixFmt == PIX_FMT_YUYV422 || 
-                    pAV->vPixFmt == PIX_FMT_UYVY422 ) 
+                if( pAV->vPixFmt == AV_PIX_FMT_YUYV422 || 
+                    pAV->vPixFmt == AV_PIX_FMT_UYVY422 ) 
                 {
                     // Stuff 2x 16bpp (YUYV, UYVY) into one RGBA pixel!
                     pAV->vTexWidth[0] = pAV->pVCodecCtx->width / 2;
@@ -1168,11 +1166,7 @@ JNIEXPORT void JNICALL FF_FUNC(setStream0)
                 }
             }
         }
-        #if LIBAVCODEC_VERSION_MAJOR >= 55
-            sp_av_frame_unref(pAV->pVFrame);
-        #else
-            sp_avcodec_default_release_buffer(pAV->pVCodecCtx, pAV->pVFrame);
-        #endif
+        sp_av_frame_unref(pAV->pVFrame);
     }
     pAV->vPTS=0;
     pAV->aPTS=0;
@@ -1204,18 +1198,11 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
 {
     FFMPEGToolBasicAV_t *pAV = (FFMPEGToolBasicAV_t *)((void *)((intptr_t)ptr));
 
-    AVPacket packet;
     jint resPTS = INVALID_PTS;
     uint8_t * pkt_odata;
     int pkt_osize;
 
-    packet.data = NULL; // minimum
-    packet.size = 0;    // requirement
-    sp_av_init_packet(&packet);
-
-    const int avRes = sp_av_read_frame(pAV->pFormatCtx, &packet);
-    pkt_odata = packet.data;
-    pkt_osize = packet.size;
+    const int avRes = sp_av_read_frame(pAV->pFormatCtx, pAV->packet);
     if( AVERROR_EOF == avRes || ( pAV->pFormatCtx->pb && pAV->pFormatCtx->pb->eof_reached ) ) {
         if( pAV->verbose ) {
             fprintf(stderr, "EOS: avRes[res %d, eos %d], pb-EOS %d\n", 
@@ -1225,44 +1212,64 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
         resPTS = END_OF_STREAM_PTS;
     } else if( 0 <= avRes ) {
         if( pAV->verbose ) {
-            fprintf(stderr, "P: ptr %p, size %d\n", packet.data, packet.size);
+            fprintf(stderr, "P: ptr %p, size %d\n", pAV->packet->data, pAV->packet->size);
         }
-        if(packet.stream_index==pAV->aid) {
+        int send_pkt = 1; // only send pkt once
+        if(pAV->packet->stream_index==pAV->aid) {
             // Decode audio frame
             if(NULL == pAV->pAFrames) { // no audio registered
-                sp_av_free_packet(&packet);
+                sp_av_packet_unref(pAV->packet);
                 return INVALID_PTS;
             }
-            int frameCount;
-            int flush_complete = 0;
-            for ( frameCount=0; 0 < packet.size || 0 == frameCount; frameCount++ ) {
-                int frameDecoded;
-                int len1;
+            int res = 0;
+            for (int frameCount=0; 0 <= res || 0 == frameCount; ++frameCount) {
                 AVFrame* pAFrameCurrent = pAV->pAFrames[pAV->aFrameCurrent];
-                if( pAV->useRefCountedFrames ) {
-                    sp_av_frame_unref(pAFrameCurrent);
-                    pAV->aFrameCurrent = ( pAV->aFrameCurrent + 1 ) % pAV->aFrameCount ;
-                }
-                sp_avcodec_get_frame_defaults(pAFrameCurrent);
-
-                if (flush_complete) {
-                    break;
-                }
-                len1 = sp_avcodec_decode_audio4(pAV->pACodecCtx, pAFrameCurrent, &frameDecoded, &packet);
-                if (len1 < 0) {
-                    // if error, we skip the frame 
-                    packet.size = 0;
-                    break;
+                sp_av_frame_unref(pAFrameCurrent);
+                pAV->aFrameCurrent = ( pAV->aFrameCurrent + 1 ) % pAV->aFrameCount ;
+
+                if( 0 < send_pkt ) { // only send pkt once
+                    send_pkt = 0;
+                    res = sp_avcodec_send_packet(pAV->pACodecCtx, pAV->packet);
+                    if ( AVERROR(EAGAIN) == res ) {
+                        // input is not accepted in the current state - user must read output
+                        res = 0; // continue draining frames
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "A-P: EAGAIN @ %d\n", frameCount);
+                        }
+                    } else if ( AVERROR_EOF == res ) {
+                        // the decoder has been flushed, and no new packets can be sent to it
+                        res = 0; // continue draining frames
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "A-P: EOF @ %d\n", frameCount);
+                        }
+                    } else if ( 0 > res ) {
+                        res = 0; // error, but continue draining frames
+                        res = 0; // continue draining frames
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "A-P: ERROR %d @ %d\n", res, frameCount);
+                        }
+                    }
                 }
-                packet.data += len1;
-                packet.size -= len1;
-
-                if (!frameDecoded) {
-                    // stop sending empty packets if the decoder is finished 
-                    if (!packet.data && pAV->pACodecCtx->codec->capabilities & CODEC_CAP_DELAY) {
-                        flush_complete = 1;
+                res = sp_avcodec_receive_frame(pAV->pACodecCtx, pAFrameCurrent);
+                if( 0 > res ) {
+                    if ( AVERROR(EAGAIN) == res ) {
+                        // output is not available in this state - user must try to send new input
+                        res = 0;
+                        if( 0 == frameCount && pAV->verbose ) {
+                            fprintf(stderr, "A-F: EAGAIN @ %d\n", frameCount); // drained at start
+                        } // else expected to be drained
+                    } else if ( AVERROR_EOF == res ) {
+                        // the decoder has been fully flushed
+                        res = 0;
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "A-F: EOF @ %d\n", frameCount);
+                        }
+                    } else {
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "A-F: ERROR %d @ %d\n", res, frameCount);
+                        }
                     }
-                    continue;
+                    break; // end loop
                 }
 
                 int32_t data_size = 0;
@@ -1279,7 +1286,7 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
                 #endif
 
                 const AVRational time_base = pAV->pAStream->time_base;
-                const int64_t pkt_pts = pAFrameCurrent->pkt_pts;
+                const int64_t pkt_pts = pAFrameCurrent->pts;
                 if( 0 == frameCount && AV_NOPTS_VALUE != pkt_pts ) { // 1st frame only, discard invalid PTS ..
                     pAV->aPTS = my_av_q2i32( pkt_pts * 1000, time_base);
                 } else { // subsequent frames or invalid PTS ..
@@ -1289,14 +1296,14 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
                 if( pAV->verbose ) {
                     int32_t aDTS = my_av_q2i32( pAFrameCurrent->pkt_dts * 1000, time_base);
 
-                    fprintf(stderr, "A pts %d [pkt_pts %"PRId64"], dts %d [pkt_dts %"PRId64"], f# %d, aFrame %d/%d %p, dataPtr %p, dataSize %d\n", 
+                    fprintf(stderr, "A pts %d [pkt_pts %"PRId64"], dts %d [pkt_dts %"PRId64"], f# %d, aFrame %d/%d %p, dataPtr %p, dataSize %d\n",
                         pAV->aPTS, pkt_pts, aDTS, pAFrameCurrent->pkt_dts, frameCount,
                         pAV->aFrameCurrent, pAV->aFrameCount, pAFrameCurrent, pAFrameCurrent->data[0], data_size);
                 }
                 if( NULL != env ) {
                     void* data_ptr = pAFrameCurrent->data[0]; // default
 
-                    if( NULL != pAV->avResampleCtx || NULL != pAV->swResampleCtx ) {
+                    if( NULL != pAV->swResampleCtx ) {
                         uint8_t *tmp_out;
                         int out_samples=-1, out_size, out_linesize;
                         int osize      = sp_av_get_bytes_per_sample( pAV->aSampleFmtOut );
@@ -1314,14 +1321,7 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
                         }
                         pAV->aResampleBuffer = tmp_out;
 
-                        if( NULL != pAV->avResampleCtx ) {
-                            out_samples = sp_avresample_convert(pAV->avResampleCtx,
-                                                                &pAV->aResampleBuffer,
-                                                                out_linesize, nb_samples,
-                                                                pAFrameCurrent->data,
-                                                                pAFrameCurrent->linesize[0],
-                                                                pAFrameCurrent->nb_samples);
-                        } else if( NULL != pAV->swResampleCtx ) {
+                        if( NULL != pAV->swResampleCtx ) {
                             out_samples =  sp_swr_convert(pAV->swResampleCtx, 
                                                           &pAV->aResampleBuffer, nb_samples,
                                                           (const uint8_t **)pAFrameCurrent->data, pAFrameCurrent->nb_samples);
@@ -1357,41 +1357,63 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
                     (*env)->CallVoidMethod(env, pAV->ffmpegMediaPlayer, ffmpeg_jni_mid_pushSound, pNIOBufferCurrent->nioRef, data_size, pAV->aPTS);
                 }
             }
-        } else if(packet.stream_index==pAV->vid) {
+        } else if(pAV->packet->stream_index==pAV->vid) {
             // Decode video frame
             if(NULL == pAV->pVFrame) {
-                sp_av_free_packet(&packet);
+                sp_av_packet_unref(pAV->packet);
                 return INVALID_PTS;
             }
-            int frameCount;
-            int flush_complete = 0;
-            for ( frameCount=0; 0 < packet.size || 0 == frameCount; frameCount++ ) {
-                int frameDecoded;
-                int len1;
-                sp_avcodec_get_frame_defaults(pAV->pVFrame);
-                if (flush_complete) {
-                    break;
-                }
-                len1 = sp_avcodec_decode_video2(pAV->pVCodecCtx, pAV->pVFrame, &frameDecoded, &packet);
-                if (len1 < 0) {
-                    // if error, we skip the frame
-                    packet.size = 0;
-                    break;
+            int res = 0;
+            for (int frameCount=0; 0 <= res || 0 == frameCount; ++frameCount) {
+                sp_av_frame_unref(pAV->pVFrame);
+
+                if( 0 < send_pkt ) { // only send pkt once
+                    send_pkt = 0;
+                    res = sp_avcodec_send_packet(pAV->pVCodecCtx, pAV->packet);
+                    if ( AVERROR(EAGAIN) == res ) {
+                        // input is not accepted in the current state - user must read output
+                        res = 0; // continue draining frames
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "V-P: EAGAIN @ %d\n", frameCount);
+                        }
+                    } else if ( AVERROR_EOF == res ) {
+                        // the decoder has been flushed, and no new packets can be sent to it
+                        res = 0; // continue draining frames
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "V-P: EOF @ %d\n", frameCount);
+                        }
+                    } else if ( 0 > res ) {
+                        res = 0; // error, but continue draining frames
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "V-P: ERROR %d @ %d\n", res, frameCount);
+                        }
+                    }
                 }
-                packet.data += len1;
-                packet.size -= len1;
-
-                if (!frameDecoded) {
-                    // stop sending empty packets if the decoder is finished
-                    if (!packet.data && pAV->pVCodecCtx->codec->capabilities & CODEC_CAP_DELAY) {
-                        flush_complete = 1;
+                res = sp_avcodec_receive_frame(pAV->pVCodecCtx, pAV->pVFrame);
+                if( 0 > res ) {
+                    if ( AVERROR(EAGAIN) == res ) {
+                        // output is not available in this state - user must try to send new input
+                        res = 0;
+                        if( 0 == frameCount && pAV->verbose ) {
+                            fprintf(stderr, "V-F: EAGAIN @ %d\n", frameCount); // drained at start
+                        } // else expected to be drained
+                    } else if ( AVERROR_EOF == res ) {
+                        // the decoder has been fully flushed
+                        res = 0;
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "V-F: EOF @ %d\n", frameCount);
+                        }
+                    } else {
+                        if( pAV->verbose ) {
+                            fprintf(stderr, "V-F: ERROR %d @ %d\n", res, frameCount);
+                        }
                     }
-                    continue;
+                    break; // end loop
                 }
 
                 // FIXME: Libav Binary compatibility! JAU01
                 const AVRational time_base = pAV->pVStream->time_base;
-                const int64_t pkt_pts = pAV->pVFrame->pkt_pts;
+                const int64_t pkt_pts = pAV->pVFrame->pts;
                 const int64_t pkt_dts = pAV->pVFrame->pkt_dts;
                 const int64_t fix_pts = evalPTS(&pAV->vPTSStats, pkt_pts, pkt_dts);
                 if( AV_NOPTS_VALUE != fix_pts ) { // discard invalid PTS ..
@@ -1409,17 +1431,15 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
 
                     const char * warn = frame_repeat_i > 0 ? "REPEAT" : "NORMAL" ;
 
-                    fprintf(stderr, "V fix_pts %d, pts %d [pkt_pts %"PRId64"], dts %d [pkt_dts %"PRId64"], time d(%lf s + r %lf = %lf s), i(%d ms + r %d = %d ms) - %s - f# %d, dec %d, data %p, lsz %d\n",
+                    fprintf(stderr, "V fix_pts %d, pts %d [pkt_pts %"PRId64"], dts %d [pkt_dts %"PRId64"], time d(%lf s + r %lf = %lf s), i(%d ms + r %d = %d ms) - %s - f# %d, data %p, lsz %d\n",
                             pAV->vPTS, vPTS, pkt_pts, vDTS, pkt_dts, 
                             frame_delay_d, frame_repeat_d, (frame_delay_d + frame_repeat_d),
                             frame_delay_i, frame_repeat_i, (frame_delay_i + frame_repeat_i), warn, frameCount,
-                            len1, pAV->pVFrame->data[0], pAV->pVFrame->linesize[0]);
+                            pAV->pVFrame->data[0], pAV->pVFrame->linesize[0]);
                     // fflush(NULL);
                 }
                 if( 0 == pAV->pVFrame->linesize[0] ) {
-                    if( pAV->useRefCountedFrames ) {
-                        sp_av_frame_unref(pAV->pVFrame);
-                    }
+                    sp_av_frame_unref(pAV->pVFrame);
                     continue;
                 }
                 resPTS = pAV->vPTS; // Video Frame!
@@ -1458,7 +1478,7 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
                                         texFmt, texType, pAV->pVFrame->data[0] + p_offset[0]);
                 DBG_TEXSUBIMG2D_b(pAV);
 
-                if( pAV->vPixFmt == PIX_FMT_YUV420P || pAV->vPixFmt == PIX_FMT_YUVJ420P ) {
+                if( pAV->vPixFmt == AV_PIX_FMT_YUV420P || pAV->vPixFmt == AV_PIX_FMT_YUVJ420P ) {
                     // U plane
                     // FIXME: Libav Binary compatibility! JAU01
                     DBG_TEXSUBIMG2D_a('U',pAV,1,1,2,1);
@@ -1475,7 +1495,7 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
                                             pAV->vTexWidth[2],      pAV->pVCodecCtx->height/2, 
                                             texFmt, texType, pAV->pVFrame->data[2] + p_offset[2]);
                     DBG_TEXSUBIMG2D_b(pAV);
-                } else if( pAV->vPixFmt == PIX_FMT_YUV422P || pAV->vPixFmt == PIX_FMT_YUVJ422P ) {
+                } else if( pAV->vPixFmt == AV_PIX_FMT_YUV422P || pAV->vPixFmt == AV_PIX_FMT_YUVJ422P ) {
                     // U plane
                     // FIXME: Libav Binary compatibility! JAU01
                     DBG_TEXSUBIMG2D_a('U',pAV,1,1,1,1);
@@ -1496,15 +1516,11 @@ JNIEXPORT jint JNICALL FF_FUNC(readNextPacket0)
 
                 pAV->procAddrGLFinish();
                 //pAV->procAddrGLFlush();
-                if( pAV->useRefCountedFrames ) {
-                    sp_av_frame_unref(pAV->pVFrame);
-                }
+                sp_av_frame_unref(pAV->pVFrame);
             }
         }
         // restore orig pointer and size values, we may have moved along within packet
-        packet.data = pkt_odata;
-        packet.size = pkt_osize;
-        sp_av_free_packet(&packet);
+        sp_av_packet_unref(pAV->packet);
     }
     return resPTS;
 }
@@ -1560,12 +1576,12 @@ JNIEXPORT jint JNICALL FF_FUNC(seek0)
         pos0 = pAV->vPTS;
         streamID = pAV->vid;
         time_base = pAV->pVStream->time_base;
-        pts0 = pAV->pVFrame->pkt_pts;
+        pts0 = pAV->pVFrame->pts;
     } else if( pAV->aid >= 0 ) {
         pos0 = pAV->aPTS;
         streamID = pAV->aid;
         time_base = pAV->pAStream->time_base;
-        pts0 = pAV->pAFrames[pAV->aFrameCurrent]->pkt_pts;
+        pts0 = pAV->pAFrames[pAV->aFrameCurrent]->pts;
     } else {
         return pAV->vPTS;
     }
@@ -1600,7 +1616,7 @@ JNIEXPORT jint JNICALL FF_FUNC(seek0)
     if(NULL != pAV->pACodecCtx) {
         sp_avcodec_flush_buffers( pAV->pACodecCtx );
     }
-    const jint rPTS =  my_av_q2i32( ( pAV->vid >= 0 ? pAV->pVFrame->pkt_pts : pAV->pAFrames[pAV->aFrameCurrent]->pkt_pts ) * 1000, time_base);
+    const jint rPTS =  my_av_q2i32( ( pAV->vid >= 0 ? pAV->pVFrame->pts : pAV->pAFrames[pAV->aFrameCurrent]->pts ) * 1000, time_base);
     if(pAV->verbose) {
         fprintf(stderr, "SEEK: post : res %d, u %d\n", res, rPTS);
     }
-- 
cgit v1.2.3