diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Converters.cs b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Converters.cs index c6c6c0c..edcc58d 100644 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Converters.cs +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Converters.cs @@ -22,6 +22,34 @@ using System.Windows.Data; namespace QuickLook.Plugin.VideoViewer { + public sealed class TimeSpanToSecondsConverter : DependencyObject, IValueConverter + { + public object Convert(object value, Type targetType, object parameter, CultureInfo culture) + { + if (value is TimeSpan span) + return span.TotalSeconds; + if (value is Duration duration) + return duration.HasTimeSpan ? duration.TimeSpan.TotalSeconds : 0d; + + return 0d; + } + + object IValueConverter.ConvertBack(object value, Type targetType, object parameter, CultureInfo culture) + { + var span = TimeSpan.Zero; + + if (value != null) + span = TimeSpan.FromSeconds((double) value); + + if (targetType == typeof(TimeSpan)) + return span; + if (targetType == typeof(Duration)) + return new Duration(span); + + return Activator.CreateInstance(targetType); + } + } + public sealed class TimeSpanToShortStringConverter : DependencyObject, IValueConverter { public object Convert(object value, Type targetType, object parameter, CultureInfo culture) @@ -29,13 +57,17 @@ namespace QuickLook.Plugin.VideoViewer if (value == null) return "00:00"; - var v = (TimeSpan) value; + var span = TimeSpan.Zero; + if (value is Duration duration) + span = duration.HasTimeSpan ? duration.TimeSpan : TimeSpan.Zero; + if (value is TimeSpan timespan) + span = timespan; var s = string.Empty; - if (v.Hours > 0) - s += $"{v.Hours:D2}:"; + if (span.Hours > 0) + s += $"{span.Hours:D2}:"; - s += $"{v.Minutes:D2}:{v.Seconds:D2}"; + s += $"{span.Minutes:D2}:{span.Seconds:D2}"; return s; } @@ -55,13 +87,9 @@ namespace QuickLook.Plugin.VideoViewer if (value == null) return Volumes[0]; - var v = (int) value; - if (v == 0) - return Volumes[0]; + var v = (int) Math.Min(100, Math.Max((double) value * 100, 0)); - v = Math.Min(v, 100); - - return Volumes[1 + v / 34]; + return v == 0 ? Volumes[0] : Volumes[1 + v / 34]; } public object ConvertBack(object value, Type targetType, object parameter, CultureInfo culture) diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.XML b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.XML deleted file mode 100644 index 3c5d9cb..0000000 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.XML +++ /dev/null @@ -1,1242 +0,0 @@ - - - - Meta.Vlc.Wpf - - - - - The manager of LibVlc api. - - - - - Release VLC instance. - - - - - The path of LibVlc dlls. - - - - - The options when initialize LibVlc. - - - - - The list of VLC. - - - - - Default VLC instance. - - - - - The state of VLC initialization. - - - - - Initialize the VLC with path of LibVlc. - - - - - - Initialize the VLC with path of LibVlc and options. - - - - - - - Pixel chroma type. - - - - - 5 bit for each RGB channel, no alpha channel, BGRA5550(15bit / pixel). - - - - - 5 bit Red, 6 bit Green and 5 bit Blue, no alpha channel, BGRA5650(16bit / pixel). - - - - - 8 bit for each RGB channel, no alpha channel, BGRA8880(24bit / pixel). - - - - - 8 bit per RGB channel and 8 bit unused, no alpha channel, BGRA8880(32bit / pixel). - - - - - 8 bit for each BGRA channel, RGBA8888(32bit / pixel). - - - - - 12 bits per pixel planar format with Y plane followed by V and U planes. - - - - - Same as YV12 but V and U are swapped. - - - - - 12 bits per pixel planar format with Y plane and interleaved UV plane. - - - - - 16 bits per pixel packed YUYV array. - - - - - 16 bits per pixel packed UYVY array. - - - - - Same as I420, mainly used with MJPG codecs. - - - - - Same as YUY2, mainly used with MJPG codecs. - - - - - Define the behavior when media is ended. - - - - - Do nothing, player's state is Ended, you need stop the player to play current media again. - - - - - Stop the player. - - - - - Play current media again. - - - - - Default behavior, same as Stop. - - - - - Some extension method. - - - - - Return the value from the selector, unless the object is null. Then return the default value. - - - - - - - - - - - Combine path to src path. - - - - - - - - Check a path is a drive root directory or not. - - - - - - - Convert a path to uri. - - - - - - - Format a path with '/' and ends with '/'. - - - - - - - Quickly async invoke a action. - - - - - - Indicates that the value of the marked element could be null sometimes, - so the check for null is necessary before its usage. - - - - [CanBeNull] public object Test() { return null; } - public void UseTest() { - var p = Test(); - var s = p.ToString(); // Warning: Possible 'System.NullReferenceException' - } - - - - - - Indicates that the value of the marked element could never be null. - - - - [NotNull] public object Foo() { - return null; // Warning: Possible 'null' assignment - } - - - - - - Can be appplied to symbols of types derived from IEnumerable as well as to symbols of Task - and Lazy classes to indicate that the value of a collection item, of the Task.Result property - or of the Lazy.Value property can never be null. - - - - - Can be appplied to symbols of types derived from IEnumerable as well as to symbols of Task - and Lazy classes to indicate that the value of a collection item, of the Task.Result property - or of the Lazy.Value property can be null. - - - - - Indicates that the marked method builds string by format pattern and (optional) arguments. - Parameter, which contains format string, should be given in constructor. The format string - should be in -like form. - - - - [StringFormatMethod("message")] - public void ShowError(string message, params object[] args) { /* do something */ } - public void Foo() { - ShowError("Failed: {0}"); // Warning: Non-existing argument in format string - } - - - - - - Specifies which parameter of an annotated method should be treated as format-string - - - - - For a parameter that is expected to be one of the limited set of values. - Specify fields of which type should be used as values for this parameter. - - - - - Indicates that the function argument should be string literal and match one - of the parameters of the caller function. For example, ReSharper annotates - the parameter of . - - - - public void Foo(string param) { - if (param == null) - throw new ArgumentNullException("par"); // Warning: Cannot resolve symbol - } - - - - - - Indicates that the method is contained in a type that implements - System.ComponentModel.INotifyPropertyChanged interface and this method - is used to notify that some property value changed. - - - The method should be non-static and conform to one of the supported signatures: - - - NotifyChanged(string) - - - NotifyChanged(params string[]) - - - NotifyChanged{T}(Expression{Func{T}}) - - - NotifyChanged{T,U}(Expression{Func{T,U}}) - - - SetProperty{T}(ref T, T, string) - - - - - - public class Foo : INotifyPropertyChanged { - public event PropertyChangedEventHandler PropertyChanged; - [NotifyPropertyChangedInvocator] - protected virtual void NotifyChanged(string propertyName) { ... } - - private string _name; - public string Name { - get { return _name; } - set { _name = value; NotifyChanged("LastName"); /* Warning */ } - } - } - - Examples of generated notifications: - - - NotifyChanged("Property") - - - NotifyChanged(() => Property) - - - NotifyChanged((VM x) => x.Property) - - - SetProperty(ref myField, value, "Property") - - - - - - - Describes dependency between method input and output. - - -

Function Definition Table syntax:

- - FDT ::= FDTRow [;FDTRow]* - FDTRow ::= Input => Output | Output <= Input - Input ::= ParameterName: Value [, Input]* - Output ::= [ParameterName: Value]* {halt|stop|void|nothing|Value} - Value ::= true | false | null | notnull | canbenull - - If method has single input parameter, it's name could be omitted.
- Using halt (or void/nothing, which is the same) - for method output means that the methos doesn't return normally.
- canbenull annotation is only applicable for output parameters.
- You can use multiple [ContractAnnotation] for each FDT row, - or use single attribute with rows separated by semicolon.
-
- - - - - [ContractAnnotation("=> halt")] - public void TerminationMethod() - - - - - [ContractAnnotation("halt <= condition: false")] - public void Assert(bool condition, string text) // regular assertion method - - - - - [ContractAnnotation("s:null => true")] - public bool IsNullOrEmpty(string s) // string.IsNullOrEmpty() - - - - - // A method that returns null if the parameter is null, - // and not null if the parameter is not null - [ContractAnnotation("null => null; notnull => notnull")] - public object Transform(object data) - - - - - [ContractAnnotation("s:null=>false; =>true,result:notnull; =>false, result:null")] - public bool TryParse(string s, out Person result) - - - - -
- - - Indicates that marked element should be localized or not. - - - - [LocalizationRequiredAttribute(true)] - public class Foo { - private string str = "my string"; // Warning: Localizable string - } - - - - - - Indicates that the value of the marked type (or its derivatives) - cannot be compared using '==' or '!=' operators and Equals() - should be used instead. However, using '==' or '!=' for comparison - with null is always permitted. - - - - [CannotApplyEqualityOperator] - class NoEquality { } - class UsesNoEquality { - public void Test() { - var ca1 = new NoEquality(); - var ca2 = new NoEquality(); - if (ca1 != null) { // OK - bool condition = ca1 == ca2; // Warning - } - } - } - - - - - - When applied to a target attribute, specifies a requirement for any type marked - with the target attribute to implement or inherit specific type or types. - - - - [BaseTypeRequired(typeof(IComponent)] // Specify requirement - public class ComponentAttribute : Attribute { } - [Component] // ComponentAttribute requires implementing IComponent interface - public class MyComponent : IComponent { } - - - - - - Indicates that the marked symbol is used implicitly (e.g. via reflection, in external library), - so this symbol will not be marked as unused (as well as by other usage inspections). - - - - - Should be used on attributes and causes ReSharper to not mark symbols marked with such attributes - as unused (as well as by other usage inspections) - - - - Only entity marked with attribute considered used. - - - Indicates implicit assignment to a member. - - - - Indicates implicit instantiation of a type with fixed constructor signature. - That means any unused constructor parameters won't be reported as such. - - - - Indicates implicit instantiation of a type. - - - - Specify what is considered used implicitly when marked - with or . - - - - Members of entity marked with attribute are considered used. - - - Entity marked with attribute and all its members considered used. - - - - This attribute is intended to mark publicly available API - which should not be removed and so is treated as used. - - - - - Tells code analysis engine if the parameter is completely handled when the invoked method is on stack. - If the parameter is a delegate, indicates that delegate is executed while the method is executed. - If the parameter is an enumerable, indicates that it is enumerated while the method is executed. - - - - - Indicates that a method does not make any observable state changes. - The same as System.Diagnostics.Contracts.PureAttribute. - - - - [Pure] private int Multiply(int x, int y) { return x * y; } - public void Foo() { - const int a = 2, b = 2; - Multiply(a, b); // Waring: Return value of pure method is not used - } - - - - - - Indicates that a parameter is a path to a file or a folder within a web project. - Path can be relative or absolute, starting from web root (~). - - - - - An extension method marked with this attribute is processed by ReSharper code completion - as a 'Source Template'. When extension method is completed over some expression, it's source code - is automatically expanded like a template at call site. - - - Template method body can contain valid source code and/or special comments starting with '$'. - Text inside these comments is added as source code when the template is applied. Template parameters - can be used either as additional method parameters or as identifiers wrapped in two '$' signs. - Use the attribute to specify macros for parameters. - - - In this example, the 'forEach' method is a source template available over all values - of enumerable types, producing ordinary C# 'foreach' statement and placing caret inside block: - - [SourceTemplate] - public static void forEach<T>(this IEnumerable<T> xs) { - foreach (var x in xs) { - //$ $END$ - } - } - - - - - - Allows specifying a macro for a parameter of a source template. - - - You can apply the attribute on the whole method or on any of its additional parameters. The macro expression - is defined in the property. When applied on a method, the target - template parameter is defined in the property. To apply the macro silently - for the parameter, set the property value = -1. - - - Applying the attribute on a source template method: - - [SourceTemplate, Macro(Target = "item", Expression = "suggestVariableName()")] - public static void forEach<T>(this IEnumerable<T> collection) { - foreach (var item in collection) { - //$ $END$ - } - } - - Applying the attribute on a template method parameter: - - [SourceTemplate] - public static void something(this Entity x, [Macro(Expression = "guid()", Editable = -1)] string newguid) { - /*$ var $x$Id = "$newguid$" + x.ToString(); - x.DoSomething($x$Id); */ - } - - - - - - Allows specifying a macro that will be executed for a source template - parameter when the template is expanded. - - - - - Allows specifying which occurrence of the target parameter becomes editable when the template is deployed. - - - If the target parameter is used several times in the template, only one occurrence becomes editable; - other occurrences are changed synchronously. To specify the zero-based index of the editable occurrence, - use values >= 0. To make the parameter non-editable when the template is expanded, use -1. - - > - - - - Identifies the target parameter of a source template if the - is applied on a template method. - - - - - ASP.NET MVC attribute. If applied to a parameter, indicates that the parameter - is an MVC action. If applied to a method, the MVC action name is calculated - implicitly from the context. Use this attribute for custom wrappers similar to - System.Web.Mvc.Html.ChildActionExtensions.RenderAction(HtmlHelper, String). - - - - - ASP.NET MVC attribute. Indicates that a parameter is an MVC area. - Use this attribute for custom wrappers similar to - System.Web.Mvc.Html.ChildActionExtensions.RenderAction(HtmlHelper, String). - - - - - ASP.NET MVC attribute. If applied to a parameter, indicates that the parameter is - an MVC controller. If applied to a method, the MVC controller name is calculated - implicitly from the context. Use this attribute for custom wrappers similar to - System.Web.Mvc.Html.ChildActionExtensions.RenderAction(HtmlHelper, String, String). - - - - - ASP.NET MVC attribute. Indicates that a parameter is an MVC Master. Use this attribute - for custom wrappers similar to System.Web.Mvc.Controller.View(String, String). - - - - - ASP.NET MVC attribute. Indicates that a parameter is an MVC model type. Use this attribute - for custom wrappers similar to System.Web.Mvc.Controller.View(String, Object). - - - - - ASP.NET MVC attribute. If applied to a parameter, indicates that the parameter is an MVC - partial view. If applied to a method, the MVC partial view name is calculated implicitly - from the context. Use this attribute for custom wrappers similar to - System.Web.Mvc.Html.RenderPartialExtensions.RenderPartial(HtmlHelper, String). - - - - - ASP.NET MVC attribute. Allows disabling inspections for MVC views within a class or a method. - - - - - ASP.NET MVC attribute. Indicates that a parameter is an MVC display template. - Use this attribute for custom wrappers similar to - System.Web.Mvc.Html.DisplayExtensions.DisplayForModel(HtmlHelper, String). - - - - - ASP.NET MVC attribute. Indicates that a parameter is an MVC editor template. - Use this attribute for custom wrappers similar to - System.Web.Mvc.Html.EditorExtensions.EditorForModel(HtmlHelper, String). - - - - - ASP.NET MVC attribute. Indicates that a parameter is an MVC template. - Use this attribute for custom wrappers similar to - System.ComponentModel.DataAnnotations.UIHintAttribute(System.String). - - - - - ASP.NET MVC attribute. If applied to a parameter, indicates that the parameter - is an MVC view. If applied to a method, the MVC view name is calculated implicitly - from the context. Use this attribute for custom wrappers similar to - System.Web.Mvc.Controller.View(Object). - - - - - ASP.NET MVC attribute. When applied to a parameter of an attribute, - indicates that this parameter is an MVC action name. - - - - [ActionName("Foo")] - public ActionResult Login(string returnUrl) { - ViewBag.ReturnUrl = Url.Action("Foo"); // OK - return RedirectToAction("Bar"); // Error: Cannot resolve action - } - - - - - - Razor attribute. Indicates that a parameter or a method is a Razor section. - Use this attribute for custom wrappers similar to - System.Web.WebPages.WebPageBase.RenderSection(String). - - - - - Indicates how method, constructor invocation or property access - over collection type affects content of the collection. - - - - Method does not use or modify content of the collection. - - - Method only reads content of the collection but does not modify it. - - - Method can change content of the collection but does not add new elements. - - - Method can add new elements to the collection. - - - - Indicates that the marked method is assertion method, i.e. it halts control flow if - one of the conditions is satisfied. To set the condition, mark one of the parameters with - attribute. - - - - - Indicates the condition parameter of the assertion method. The method itself should be - marked by attribute. The mandatory argument of - the attribute is the assertion type. - - - - - Specifies assertion type. If the assertion method argument satisfies the condition, - then the execution continues. Otherwise, execution is assumed to be halted. - - - - Marked parameter should be evaluated to true. - - - Marked parameter should be evaluated to false. - - - Marked parameter should be evaluated to null value. - - - Marked parameter should be evaluated to not null value. - - - - Indicates that the marked method unconditionally terminates control flow execution. - For example, it could unconditionally throw exception. - - - - - Indicates that method is pure LINQ method, with postponed enumeration (like Enumerable.Select, - .Where). This annotation allows inference of [InstantHandle] annotation for parameters - of delegate type by analyzing LINQ method chains. - - - - - Indicates that IEnumerable, passed as parameter, is not enumerated. - - - - - Indicates that parameter is regular expression pattern. - - - - - XAML attribute. Indicates the type that has ItemsSource property and should be treated - as ItemsControl-derived type, to enable inner items DataContext type resolve. - - - - - XAML attibute. Indicates the property of some BindingBase-derived type, that - is used to bind some item of ItemsControl-derived type. This annotation will - enable the DataContext type resolve for XAML bindings for such properties. - - - Property should have the tree ancestor of the ItemsControl type or - marked with the attribute. - - - - - Prevents the Member Reordering feature from tossing members of the marked class. - - - The attribute must be mentioned in your member reordering patterns - - - - - Context used to render video data. - - - - - The format of snapshot. - - - - - BMP - - - - - Jpeg - - - - - PNG - - - - - Aspect ratio enumeration. - - - - - Default aspect ratio. - - - - - 16:9 - - - - - 4:3 - - - - - 一个强类型的资源类,用于查找本地化的字符串等。 - - - - - 返回此类使用的缓存的 ResourceManager 实例。 - - - - - 使用此强类型资源类,为所有资源查找 - 重写当前线程的 CurrentUICulture 属性。 - - - - - VLC media player. - - - - - Create a for XAML, you should not use it to create player in your C# code, if you don't - want to add it to XAML visual three. use and to - instead. - - - - - Create a for C# code, if you want to display video with - , please set to true, player will - generate on thread of it. - - Do you want to display video with ? - - - - Create a for C# code, use player will generate on specified - thread. - - - The dispatcher of thread which you want to generate - on. - - - - - Initialize VLC player with path of LibVlc. - - - - - - Initialize VLC player with path of LibVlc and options. - - - - - - - Cleanup the player used resource. - - - - - - Cleanup the player used resource. - - - - - Load a media by file path. - - - - - - Load a media by uri. - - - - - - Load a media by file path and options. - - - - - - - Load a media by uri and options. - - - - - - - Play media. - - - - - Pause media. - - - - - Resume media. - - - - - Pause or resume media. - - - - - Replay media. - - - - - Stop media. - - - - - Add options to media. - - - - - - Show next frame. - - - - - Inactive with DVD menu. - - - - - - Toggle mute mode. - - - - - Take a snapshot. - - - - - - - - Take a snapshot. - - - - - - - Gets a list of potential audio output devices. - - - - - - Gets a list of audio output devices for a given audio output module. - - - - - - - Gets the list of available audio output modules. - - - - - - Selects an audio output module. - Any change will take be effect only after playback is stopped and restarted. Audio output cannot be changed while - playing. - - - - - - - Get the current audio output device identifier. - - - - - Configures an explicit audio output device. If the module paramater is NULL, - audio output will be moved to the device specified by the device identifier string immediately. - This is the recommended usage. A list of adequate potential device strings can be obtained with - . - However passing NULL is supported in LibVLC version 2.2.0 and later only; in earlier versions, this function would - have no effects when the module parameter was NULL. - If the module parameter is not NULL, the device parameter of the corresponding audio output, if it exists, will be - set to the specified string. - Note that some audio output modules do not have such a parameter (notably MMDevice and PulseAudio). - A list of adequate potential device strings can be obtained with . - - - - - The path of LibVlc, it is a DependencyProperty. - - - - - The options of LibVlc, it is a DependencyProperty. - - - - - The aspect ratio of video, it is a DependencyProperty. - - - - - The stretch mode of video. - - - - - The stretch direction of video. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Get or set progress of media, between 0 and 1. - - - - - Get or set current time progress of media. - - - - - Get FPS of media. - - - - - Get or set state of mute. - - - - - Get or set output channel of audio. - - - - - Get track count of audio. - - - - - Get or set track index of audio. - - - - - Get description of audio track. - - - - - Get or set rate of media. - - - - - Get or set title index of media. - - - - - Get title count of media. - - - - - Get or set chapter index of media. - - - - - Get chapter count of media. - - - - - Checks if media is seekable. - - - - - Get state of media. - - - - - Get length of media. - - - - - Get internal VlcMediaPlayer, it is best not to use this, unless you need to customize advanced features. - - - - - Get internal Vlc. - - - - - Get or set volume of media. - - - - - Get or set audio equalizer. - - - - - The image data of video, it is created on other thread, you can't use it in main thread. - - - - - VlcPlayer create mode. - - - - - Create a new instance with default instance. - - - - - Create a new instance with a new instance. - - - - - Width of video. - - - - - Height of video. - - - - - Video chroma type. - - -
-
diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.dll b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.dll deleted file mode 100644 index 219dfce..0000000 Binary files a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.dll and /dev/null differ diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.pdb b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.pdb deleted file mode 100644 index b1cccb7..0000000 Binary files a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.Wpf.pdb and /dev/null differ diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.XML b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.XML deleted file mode 100644 index 27ac9fb..0000000 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.XML +++ /dev/null @@ -1,3085 +0,0 @@ - - - - Meta.Vlc - - - - - A warpper for struct. - - - - - A list warpper for linklist struct. - - - - - Create a readonly list by a pointer of . - - - - - - Audio equalizer of VLC player. - - - - - Create a new default equalizer, with all frequency values zeroed. - - - - - Create a new equalizer, with initial frequency values copied from an existing preset. - - - - - - Create a new equalizer, with initial frequency values copied from an existing preset. - - - - - - 获取一个值,该值指示当前模块是否被载入 - - - - - Get the number of equalizer presets. - - - - - Get the number of distinct frequency bands for an equalizer. - - - - - Aways return . - - - - - Get or set the current pre-amplification value from an equalizer. - - - - - Get or set the amplification value for a particular equalizer frequency band. - - frequency band index. - - - - - Get the name of a particular equalizer preset. - - - - - - - Get a particular equalizer band frequency. - - - - - - - A warpper for struct. - - - - - A list warpper for linklist struct. - - - - - Create a readonly list by a pointer of . - - - - - - A base class of LibVlc exceptions. - - - - - Create exception with a message. - - exception message - - - - Create exception with a message and a inner exception. - - exception message - inner exception - - - - If a LibVlc function don't have , this exception will be throwed. - - - - - Create a . - - - - - If a function can't be found in LibVlc dlls, this exception will be throwed, maybe we should check the LibVlc - version what the function need. - - - - - Create a with function's infomation and LibVlc's version. - - infomation of function - version of LibVlc - - - - Create a with function's infomation, LibVlc's version and a inner - exception. - - infomation of function - version of LibVlc - inner exception - - - - Infomation of function what not found. - - - - - Versiong infomation of current LibVlc. - - - - - If a function is not available in current version LibVlc, but you call this, the exception will be throwed. - - - - - Create a with function's infomation and LibVlc's version. - - infomation of function - version of LibVlc - - - - Infomation of function what not found. - - - - - Versiong infomation of current LibVlc. - - - - - If a version string parse failed, this exception will be throwed. - - - - - Create a with parse failed version string. - - - - - - Parse failed version string. - - - - - If some exception throwed when loading LibVlc, this exception will be throwed. Maybe you should check the LibVlc - target platform and your app target platform. - - - - - Create a . - - - - - Create a with a inner exception. - - - - - If create a new Vlc instence return NULL, this exception will be throwed. Maybe you should check your Vlc options. - - - - - Create a . - - - - - Create a with some message. - - - - - Some helper method of interopping with unmanaged dlls. - - - - - Convert a pointer of string to manmaged . - - pointer of string - count of string, -1 mean auto check the end char - free this pointer when convert over - encoding of string - result string - - - - Pinned a to get pointer of this, you should call when all is - over. - - string you need pinned - GCHandle of , you can call to get pointer. - - - - Convert a pointer array to array. - - pointer array - length of pointer array - array - - - - Get a pointer of array. - - array - pointer of array - - - - 尝试启动一个用户接口,用于 LibVlc 实例 - - LibVlc 实例指针 - 接口名,为 NULL 则为默认 - 如果成功会返回 0 ,否则会返回 -1 - - - - 获取可用的音频过滤器 - - LibVlc 实例指针 - 可用音频过滤器列表指针,这是一个 类型的指针 - - - - 释放由 LibVlc 函数返回的指针资源,其作用类似于 C语言 中的 free() 函数 - - 指针 - - - - 获取 LibVlc 的变更集(?) - - 返回 LibVlc 的变更集,类似于 "aa9bce0bc4" - - - - 获取 LibVlc 的编译器信息 - - 返回 LibVlc 的编译器信息 - - - - 获取 LibVlc 的版本信息 - - 返回 LibVlc 的版本信息,类似于 "1.1.0-git The Luggage" - - - - 释放 的资源 - - 资源指针 - - - - 创建并初始化一个 LibVlc 实例,并提供相应的参数,这些参数和命令行提供的参数类似,会影响到 LibVlc 实例的默认配置. - 有效参数的列表取决于 LibVlc 版本,操作系统,可用 LibVlc 插件和平台.无效或不支持的参数会导致实例创建失败 - - 参数个数 - 参数列表 - 返回 LibVlc 实例指针,如果出错将返回 NULL - - - - 递减 LibVlc 实例的引用计数,如果它达到零,将会释放这个实例 - - 需要释放的 LibVlc 实例指针 - - - - 递增 LibVlc 实例的引用计数,当调用 NewInstance 初始化成功时,引用计数将初始化为1 - - LibVlc 实例指针 - - - - 设置一些元信息关于该应用程序 - - LibVlc 实例指针 - Java 风格的应用标识符,类似于 "com.acme.foobar" - 应用程序版本,类似于 "1.2.3" - 应用程序图标,类似于 "foobar" - - - - 为 LibVlc 设置一个回调,该回调将会在 LibVlc 退出时被调用,不能与 一起使用. - 而且,这个函数应该在播放一个列表或者开始一个用户接口前被调用,否则可能导致 LibVlc 在注册该回调前退出 - - LibVlc 实例指针 - 函数指针,这是一个参数为 void*,无返回值的函数指针 - 数据指针,将做为参数传递给回调函数 - - - - 设置一个用户代理字符串,当一个协议需要它的时候,LibVlc 将会提供该字符串 - - LibVlc 实例指针 - 应用程序名称,类似于 "FooBar player 1.2.3",实际上只要能标识应用程序,任何字符串都是可以的 - HTTP 用户代理,类似于 "FooBar/1.2.3 Python/2.6.0" - - - - 获取可用的视频过滤器 - - LibVlc 实例指针 - 可用视频过滤器列表指针,这是一个 类型的指针 - - - - 等待,直到一个接口导致 LibVlc 实例退出为止,在使用之前,应该使用 添加至少一个用户接口. - 实际上这个方法只会导致一个线程阻塞,建议使用 - - LibVlc 实例指针 - - - - 对一个 LibVlc 的模块的说明 - - - - - 获取一个可读的 LibVlc 错误信息 - - 返回一个可读的 LibVlc 错误信息,如果没有错误信息将返回 NULL - - - - 清除当前线程的 LibVlc 的错误信息 - - - - - 为一个事件通知注册一个回调 - - 事件管理器 - 事件类型 - 回调 - 由用户定义的数据 - 0代表成功,12代表出错 - - - - 为一个事件通知取消注册一个回调 - - 事件管理器 - 事件类型 - 回调 - 由用户定义的数据 - - - - 获取事件类型名称 - - 事件类型 - 返回事件类型名称 - - - - 表示一个 LibVlc 的事件回调代理 - - 事件参数 - 用户数据指针 - - - - 事件类型 - - - - - 媒体元数据改变 - - - - - 媒体的子项被添加 - - - - - 媒体时长改变 - - - - - 媒体解析状态被改变 - - - - - 媒体被释放 - - - - - 媒体状态改变 - - - - - 媒体播放器的媒体被改变 - - - - - 媒体播放器正在打开媒体 - - - - - 媒体播放器正在缓冲媒体 - - - - - 媒体播放器正在播放 - - - - - 媒体播放器被暂停 - - - - - 媒体播放器被停止播放 - - - - - 媒体播放器前进 - - - - - 媒体播放器后退 - - - - - 媒体播放器结束播放 - - - - - 媒体播放器遇到错误 - - - - - 媒体播放器时间改变 - - - - - 媒体播放器进度改变 - - - - - 媒体播放器是否允许寻址被改变 - - - - - 媒体播放器是否允许被暂停被改变 - - - - - 媒体播放器标题被改变 - - - - - 媒体播放器捕获一个快照 - - - - - 媒体播放器长度改变 - - - - - 媒体播放器视频输出改变 - - - - - 一个项被添加到媒体列表 - - - - - 一个项将被添加到媒体列表 - - - - - 一个项从媒体列表移除 - - - - - 一个项将从媒体列表移除 - - - - - 一个项被添加到媒体列表视图 - - - - - 一个项将被添加到媒体列表视图 - - - - - 一个项从媒体列表视图移除 - - - - - 一个项将从媒体列表视图移除 - - - - - 媒体列表播放器开始播放 - - - - - 媒体列表播放器跳到下个项 - - - - - 媒体列表播放器停止 - - - - - 媒体搜寻器开始搜寻 - - - - - 媒体搜寻器搜寻结束 - - - - - 一个 VLM 媒体被添加 - - - - - 一个 VLM 媒体被移除 - - - - - 一个 VLM 媒体被改变 - - - - - 一个 VLM 媒体实例开始 - - - - - 一个 VLM 媒体实例停止 - - - - - 一个 VLM 媒体实例被初始化 - - - - - 一个 VLM 媒体实例正在打开 - - - - - 一个 VLM 媒体实例正在播放 - - - - - 一个 VLM 媒体实例被暂停 - - - - - 一个 VLM 媒体实例结束播放 - - - - - 一个 VLM 媒体实例出现错误 - - - - - 向一个媒体添加一个选项,这个选项将会确定媒体播放器将如何读取介质, - - 一个媒体指针 - - - - - 向一个媒体通过可配置的标志添加一个选项,这个选项将会确定媒体播放器将如何读取介质, - - 一个媒体指针 - - - - - - 复制一个媒体对象 - - 要被复制的媒体对象 - 复制的媒体对象 - - - - 获取媒体对象的事件管理器,该函数不会增加引用计数 - - 媒体对象指针 - 返回媒体对象的事件管理器 - - - - 获取媒体的基本编码器的说明 - - 得来 - 得来 - 返回媒体的基本编码器的说明 - - - - 获取媒体的时间长度 - - 媒体对象指针 - 返回媒体的时间长度 - - - - 获取媒体的某个元属性,如果尚未解析元属性,将会返回 NULL. - 这个方法会自动调用 方法,所以你在之后应该会收到一个 MediaMetaChanged 事件. - 如果你喜欢同步版本,可以在 GetMeta 之前调用 方法 - - 媒体对象指针 - 元属性类型 - 返回媒体的某个元属性 - - - - 获取该媒体的媒体资源地址 - - 媒体对象指针 - 返回该媒体的媒体资源地址 - - - - 获取媒体当前状态 - - 媒体对象指针 - 返回媒体当前状态 - - - - 获取媒体当前统计 - - 媒体对象指针 - 统计结构体指针,指向 - 如果成功会返回 true ,否则会返回 false - - - - 获取媒体的基本流的描述,注意,在调用该方法之前你需要首先调用 方法,或者至少播放一次. - 否则,你将的得到一个空数组 - - 媒体对象指针 - 一个 数组 - 数组的元素个数 - - - - 获取由用户定义的媒体数据 - - 媒体对象指针 - 返回由用户定义的媒体数据指针 - - - - 获取一个值表示该媒体是否已经解析 - - LibVlc 实例指针 - True 表示已经解析,False 表示尚未被解析 - - - - 创建一个具有名字的媒体作为一个空节点 - - LibVlc 实例指针 - 名字 - 创建的媒体对象指针 - - - - 通过给定的文件描述符创建一个媒体,该文件描述符必须具有 Read 访问权限. - LibVlc 不会关闭任何文件描述符,尽管如此,一般一个媒体描述符只能在媒体播放器中使用一次,如果你想复用,需要使用 lseek 函数将文件描述符的文件指针倒回开头 - - LibVlc 实例指针 - 文件描述符 - 创建的媒体对象指针,发送错误时会返回 NULL - - - - 通过给定的文件 Url 创建一个媒体,该 Url 的格式必须以 "file://" 开头,参见 "RFC3986". - 对于打开本地媒体,其实我们更推荐使用 - - LibVlc 实例指针 - 媒体的文件 Url - 创建的媒体对象指针,发送错误时会返回 NULL - - - - 通过给定的文件路径创建一个媒体 - - LibVlc 实例指针 - 媒体文件路径 - 创建的媒体对象指针,发送错误时会返回 NULL - - - - 解析一个媒体,获取媒体的元数据和轨道信息 - - 媒体对象指针 - - - - 异步解析一个媒体,获取媒体的元数据和轨道信息,这是 的异步版本, - 解析完成会触发 MediaParsedChanged 事件,您可以跟踪该事件 - - 媒体对象指针 - - - - 根据提供的标志异步解析一个媒体,获取媒体的元数据和轨道信息,这是 的高级版本, - 默认情况下解析一个本地文件,解析完成会触发 MediaParsedChanged 事件,您可以跟踪该事件 - - 媒体对象指针 - 提供的解析标志 - 成功解析会返回 0,否则会返回 -1 - - - - 递减媒体对象的引用计数,如果它达到零,将会释放这个实例 - - 媒体对象指针 - - - - 递增媒体对象的引用计数 - - 媒体对象指针 - - - - 保存当前的元数据到媒体 - - 媒体对象指针 - 如果操作成功将会返回 True - - - - 设置媒体的元数据 - - 媒体对象指针 - 元数据类型 - 元数据值 - - - - 设置媒体的由用户定义的数据 - - 媒体对象指针 - 用户定义的数据 - - - - 获取媒体对象的子对象列表,这将增加引用计数,使用 来减少引用计数 - - 媒体对象指针 - 返回媒体对象的子对象列表 - - - - 获取媒体的基本流的描述,注意,在调用该方法之前你需要首先调用 方法,或者至少播放一次. - 否则,你将的得到一个空数组 - - 媒体对象指针 - 一个 数组的数组 - 返回媒体的基本流的描述 - - - - 释放一个媒体的基本流的描述的数组 - - 基本流的描述的数组 - - - - 表示音频的通道数或者视频的帧高 - - - - - 表示音频的速率或者视频的帧宽 - - - - - 表示音频的通道数 - - - - - 表示音频的速率 - - - - - 表示视频的帧高 - - - - - 表示视频的帧宽 - - - - - 表示一个 Track 的具体指针,该指针可能指向 , 或者 ,根据 - Type 的值不同,Track 的指向数据也可能不同 - - - - - 切换音频静音状态 - - - - - - 获取音频静音状态 - - - 0为正常,1为静音,-1为未定义 - - - - 设置音频静音状态 - - - - - - - 获取音频音量 - - - 0~100之间 - - - - 设置音频音量 - - - 0~100之间 - - - - 获取音频输出通道 - - - - - - - 设置音频输出通道 - - - - - - - - 获取音频轨道数 - - - - - - - 获取当前音轨 - - - - - - - 设置当前音轨 - - - - - - - - 获取音轨描述 - - - - - - - Description for audio output device. - - - - - Next entry in list. - - - - - Device identifier string. - - - - - User-friendly device description. - - - - - Description for audio output. - - - - - Gets a list of potential audio output devices. - - media player - - A NULL-terminated linked list of potential audio output devices. It must be freed with - - - - - - Frees a list of available audio output devices. - - list with audio outputs for release - - - - Gets a list of audio output devices for a given audio output module. - - libvlc instance - audio output name (as returned by ) - - A NULL-terminated linked list of potential audio output devices. It must be freed with - - - - - - Gets the list of available audio output modules. - - libvlc instance - list of available audio outputs. It must be freed with - - - - Frees the list of available audio output modules. - - list with audio outputs for release - - - - Selects an audio output module. - Any change will take be effect only after playback is stopped and restarted. Audio output cannot be changed while - playing. - - media player - name of audio output, use - 0 if function succeded, -1 on error - - - - Configures an explicit audio output device. If the module paramater is NULL, - audio output will be moved to the device specified by the device identifier string immediately. - This is the recommended usage. A list of adequate potential device strings can be obtained with - . - However passing NULL is supported in LibVLC version 2.2.0 and later only; in earlier versions, this function would - have no effects when the module parameter was NULL. - If the module parameter is not NULL, the device parameter of the corresponding audio output, if it exists, will be - set to the specified string. - Note that some audio output modules do not have such a parameter (notably MMDevice and PulseAudio). - A list of adequate potential device strings can be obtained with . - - media player - - If NULL, current audio output module. if non-NULL, name of audio output module ( - ) - - device identifier string - Nothing. Errors are ignored (this is a design issue). - - - - Get the current audio output device identifier. - - media player - - the current audio output device identifier NULL if no device is selected or in case of error (the result must - be released with ). - - - - - 获取预设均衡器数量 - - - - - - - 获取预设均衡器名称 - - 均衡器编号 - - - - - 获取均衡器频带数目 - - - - - - 获取均衡器频带的频率 - - 频带编号 - - - - - 创建一个新的均衡器 - - - - - - 从预设创建一个新的均衡器 - - 预设均衡器编号 - - - - - 释放均衡器 - - - - - - 设置均衡器的新预设放大值 - - - 取值范围为 -20.0~+20.0 - 0 成功,-1 失败 - - - - 获取均衡器的新预设放大值 - - - - - - 设置均衡器的放大值 - - 均衡器 - 取值范围为 -20.0~+20.0 - 屏带编号 - 0 成功,-1 失败 - - - - 获取均衡器的放大值 - - 均衡器 - 屏带编号 - - - - 为播放器设置均衡器,提供 NULL 来关闭均衡器,在该方法返回后即可立即释放均衡器,播放器不会引用均衡器实例 - - 播放器 - 均衡器 - - - - - 表示一个视频,音频,或者文本的描述 - - - - - 视频字幕设定项 - - - - - 当锁定图像缓存时,调用此回调. - 每当一个新帧需要被解码,都会调用此回调,一个或者三个像素平面会被通过第二个参数返回.这些像素屏幕需要 32 字节对齐 - - 一个私有指针 - 像素平面 - 一个私有指针用来显示或解锁回调用来识别图像缓存 - - - - 当解锁图像缓存时,调用此回调. - 每当一个帧被解码完成,都会调用此回调,该回调并不是必须的,但是它是读取像素值的唯一的途径. - 该回调会发生在图片解码之后,显示之前 - - 一个私有指针 - 返回的指针 - 像素平面 - - - - 当显示图像时,调用此回调. - 每当一个帧需要被显示时,都会调用此回调 - - 一个私有指针 - 返回的指针 - - - - 当配置图像缓存格式时,调用此回调. - 此回调会获取由解码器和过滤器(如果有)输出的视频的格式, - - 一个私有指针 - 视频格式识别码 - 像素宽 - 像素高 - 每个像素平面字节的扫描线间距 - 每个像素平面的扫描线的个数 - 分配的图片缓存大小,0代表失败 - - - - 配置图片缓存格式时,调用此回调 - - 一个私有指针 - - - - 音频播放时,调用此回调 - - 一个私有指针 - 采样数据 - 采样数 - 预计播放时间戳 - - - - 音频暂停时,调用此回调 - - 一个私有指针 - 请求暂停的时间戳 - - - - 音频继续播放时,调用此回调 - - 一个私有指针 - 请求继续的时间戳 - - - - 音频缓冲刷新时,调用此回调 - - 一个私有指针 - - - - - - - - - - - 音频格式完成配置时调用此回调 - - 一个私有指针 - 格式字符串,一个四字符的字符串 - 采样率 - 通道数 - 0代表成功 - - - - - - - - - 音频设置音量时,调用此回调 - - 一个私有指针 - 音量 - 是否为静音 - - - - 创建一个空的媒体播放器对象 - - LibVlc 实例指针 - 创建好的媒体播放器对象指针 - - - - 通过一个媒体对象创建一个媒体播放器对象 - - 媒体对象指针 - 创建好的媒体播放器对象指针 - - - - 递减媒体播放器对象的引用计数,如果它达到零,将会释放这个实例 - - 媒体播放器对象 - - - - 递增媒体播放器对象的引用计数 - - 媒体播放器对象 - - - - 为媒体播放器设置媒体 - - 媒体播放器对象 - 媒体对象 - - - - 获取媒体播放器的媒体 - - 媒体播放器对象 - 媒体对象 - - - - 获取媒体播放器对象的事件管理器,该函数不会增加引用计数 - - 媒体播放器对象 - 返回媒体播放器对象的事件管理器 - - - - 获取媒体播放器对象是否正在播放 - - 媒体播放器对象 - 如果播放器对象正在播放则返回 True ,否则返回 Flase - - - - 使媒体播放器开始播放 - - 媒体播放器对象 - 0代表成功,-1代表失败 - - - - 设置媒体播放器播放或者暂停,如果没有设置媒体对象,将会没有作用 - - 媒体播放器对象 - true 代表暂停,false 代表播放或继续 - - - - 设置媒体播放器的进度,如果后台播放未启用将会没有作用,根据底层的输入格式和协议,可能导致无法正常播放 - - 媒体播放器对象 - 播放进度,取值为0.0~1.0 - - - - 停止媒体播放器的播放,如果没有设置媒体将会没有作用 - - 媒体播放器对象 - - - - 设置 Video 的事件回调 - - 媒体播放器对象 - Lock 事件回调,必须 - Unlock 事件回调 - Display 事件回调 - 回调用用户数据 - - - - 设置 Video 解码格式 - - 媒体播放器对象 - 视频格式识别码,一个四个字符的识别码 - 像素宽 - 像素高 - 扫描线 - - - - 设置 Video 解码格式回调 - - 媒体播放器对象 - - - - - - 为媒体播放器设置一个视频输出句柄,将会在该句柄上绘图 - - 媒体播放器对象 - 句柄 - - - - 获取为媒体播放器设置的视频输出句柄 - - 媒体播放器对象 - 句柄 - - - - 设置 Audio 的事件回调 - - 媒体播放器对象 - - - - - - - - - 设置 Audio 的格式 - - 媒体播放器对象 - 格式字符串,一个四字符的字符串 - 采样率 - 通道数 - - - - 设置 Audio 的格式回调 - - 媒体播放器对象 - - - - - - - 媒体播放器对象 - - - - - 获取媒体的长度,以毫秒为单位 - - 媒体播放器对象 - -1为未设置媒体 - - - - 获取目前的媒体进度,以毫秒为单位 - - 媒体播放器对象 - -1为未设置媒体 - - - - 设置目前的媒体进度,以毫秒为单位 - - 媒体播放器对象 - 播放进度 - - - - 获取当前媒体进度,0~1范围 - - 媒体播放器对象 - - - - 设置当前媒体播放器的章节 - - 媒体播放器对象 - 章节 - - - - 获取当前媒体播放器的章节 - - 媒体播放器对象 - -1代表没有设置媒体 - - - - 获取当前媒体播放器的章节数 - - 媒体播放器对象 - -1代表没有设置媒体 - - - - 获取当前媒体播放器是否处于可播放 - - 媒体播放器对象 - - - - - 获取标题的章节数 - - 媒体播放器对象 - 标题 - -1代表没有设置媒体 - - - - 设置媒体播放器的标题 - - 媒体播放器对象 - - - - - 获取媒体播放器的标题 - - 媒体播放器对象 - - - - - 获取媒体播放器的标题数 - - 媒体播放器对象 - - - - - 上一个章节 - - 媒体播放器对象 - - - - 下一个章节 - - 媒体播放器对象 - - - - 获取媒体速率 - - 媒体播放器对象 - - - - 设置媒体是速率 - - 媒体播放器对象 - - - - - 获取媒体的状态 - - 媒体播放器对象 - - - - - 获取媒体的FPS - - 媒体播放器对象 - - - - - 获取该媒体播放器视频输出的个数 - - 媒体播放器对象 - - - - - 获取该媒体是否能够跳进度 - - 媒体播放器对象 - - - - - 获取该媒体是否能够暂停 - - 媒体播放器对象 - - - - - 播放下一帧 - - 媒体播放器对象 - - - - 导航DVD菜单 - - 媒体播放器对象 - - - - - 设置播放器播放视频时显示视频标题 - - 媒体播放器对象 - - - - - - 释放 TrackDescriptionList 资源 - - - - - - Get the mouse pointer coordinates over a video. - Coordinates are expressed in terms of the decoded video resolution, not in terms of pixels on the screen/viewport - (to get the latter, you can query your windowing system directly). - Either of the coordinates may be negative or larger than the corresponding dimension of the video, if the cursor is - outside the rendering area. - - media player - number of the video (starting from, and most commonly 0) - pointer to get the abscissa [OUT] - pointer to get the ordinate [OUT] - 0 on success, -1 if the specified video does not exist - - - - Set the mouse pointer coordinates over a video. - This is a special function of xZune dev version. If you display using HWND, you will needn't this function. - - media player - number of the video (starting from, and most commonly 0) - pointer to get the abscissa [OUT] - pointer to get the ordinate [OUT] - 0 on success, -1 if the specified video does not exist - - - - Set the a mouse button is down. - This is a special function of xZune dev version. If you display using HWND, you will needn't this function. - - media player - number of the video (starting from, and most commonly 0) - a enum of mouse button - 0 on success, -1 if the specified video does not exist - - - - Set the a mouse button is up. - This is a special function of xZune dev version. If you display using HWND, you will needn't this function. - - media player - number of the video (starting from, and most commonly 0) - a enum of mouse button - 0 on success, -1 if the specified video does not exist - - - - Get the pixel dimensions of a video. - - media player - number of the video (starting from, and most commonly 0) - pointer to get the pixel width [OUT] - pointer to get the pixel height [OUT] - 0 on success, -1 if the specified video does not exist - - - - Get the current video scaling factor. - - media player - - the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable - automatically. - - - - - Set the video scaling factor. - That is the ratio of the number of pixels on screen to the number of pixels in the original decoded video in each - dimension. - Zero is a special value; it will adjust the video to the output window/drawable (in windowed mode) or the entire - screen. - - media player - the scaling factor, or zero - - - - Get current video aspect ratio. - - media player - the video aspect ratio or NULL if unspecified (the result must be released with ). - - - - Set new video aspect ratio. - - media player - new video aspect-ratio or NULL to reset to default - - - - Get current video width. Use instead. - - media player - the video pixel width or 0 if not applicable - - - - Get current video height. Use instead. - - media player - the video pixel height or 0 if not applicable - - - - Get number of available video tracks. - - media player - the number of available video tracks (int) - - - - Get current video track. - - media player - the video track ID(int) or -1 if no active input - - - - Set video track. - - media player - the track ID (i_id field from track description) - 0 on success, -1 if out of range - - - - Get the description of available video tracks. - - media player - - list with description of available video tracks, or NULL on error. It must be freed with - - - - - - Get integer adjust option. - - media player - adjust option to get, values of - - - - - Get float adjust option. - - media player - adjust option to get, values of - - - - - Set adjust option as integer. Options that take a different type value are ignored. Passing libvlc_adjust_enable as option value has the side effect of starting (arg !0) or stopping (arg 0) the adjust filter. - - media player - adjust option to set, values of - adjust option value - - - - Set adjust option as float. Options that take a different type value are ignored. - - media player - adjust option to set, values of - adjust option value - - - - A enum of mouse button. - - - - - The left button of mouse. - - - - - The right button of mouse. - - - - - Other buttons of mouse, it is not commonly used. - - - - - 获取由 LibVlc 定义的当前时间 - - 返回由 LibVlc 定义的当前时间 - - - - 获取与提供的时间戳之间的延迟 - - 时间戳 - 返回与提供的时间戳之间的延迟 - - - - 释放给定的 LibVlc 的实例相关的 VLM 实例 - - - - - - 添加广播和一个输入 - - VLM 实例指针 - 广播名 - 输入媒体资源地址 - 输出媒体资源地址 - - - 设置一个值允许打开新的新的广播 - 是否广播循环播放 - - - - - 添加视频点播和一个输入 - - VLM 实例指针 - - - - - - - - - - - 删除媒体(视频点播或广播) - - VLM 实例指针 - - - - - - 启用或禁用媒体(视频点播或广播) - - - - - - - - - 设置媒体输出 - - VLM 实例指针 - - - - - - - 设置媒体的 MRL 输入 - - - - - - - - - 增加一个媒体的 MRL 输入 - - - - - - - - - 设置媒体循环状态 - - - - 媒体新的状态 - - - - - 设置媒体的 Vod Muxer - - - - - - - - - 编辑媒体参数 - - - - - - - - - - - - - - 播放指定媒体 - - - 指定的媒体的名字 - - - - - 停止指定的媒体 - - - 指定的媒体的名字 - - - - - 暂停指定的媒体 - - - 指定的媒体的名字 - - - - - 在指定的广播中寻找 - - - 指定的媒体的名字 - 寻找进度的百分比数值 - - - - - 以 Json 字符串的形式返回一个关于媒体的信息 - - - - - - - - 通过名称或 ID 获取媒体实例的位置 - - - - - - - - - 通过名称或 ID 获取媒体实例的时间 - - - - - - - - - 通过名称或 ID 获取媒体实例的长度 - - - - - - - - - 通过名称或 ID 获取媒体实例的退率 - - - - - - - - - 从 Vim Media 中得到 Libvlc 事件管理器 - - - - - - - A dynamic mapper of LibVlc functions. - - - - - - Load a LibVlc function from unmanaged to managed. - - A custom attribute type cannot be loaded. - - For LibVlcFunction, need LibVlcFunctionAttribute to get Infomation - of function. - - Can't find function in dll. - - - - Get this is available or not. - - - - - Get infomation of this . - - - - - Get delegate of this , if is false, this method will throw - exception. - - This function isn't available on current version LibVlc. - - - - 为 LibVlc 函数委托初始化提供必要的信息 - - - - - 指定该委托在 LibVlc 中的函数名,不限定 LibVlc 的版本 - - 函数名 - - - - 指定该委托在 LibVlc 中的函数名,并要求不低于指定版本的 LibVlc - - 函数名 - 最低支持的 LibVlc - - - - 指定该委托在 LibVlc 中的函数名,并要求不低于指定版本的 LibVlc,也不高于指定的最大版本 - - 函数名 - 最低支持的 LibVlc - 最高支持的 LibVlc - - - - 指定该委托在 LibVlc 中的函数名,并要求不低于指定版本的 LibVlc,也不高于指定的最大版本 - - 函数名 - 最低支持的 LibVlc - 最高支持的 LibVlc - 特定支持的 LibVlc 开发版本 - - - - 获取一个值,表示函数在 LibVlc 中的名称 - - - - - 获取一个值,表示支持该函数的最小 LibVlc 版本 - - - - - 获取一个值,表示支持该函数的最大 LibVlc 版本 - - - - - 获取一个值,表示特定的开发版本 - - - - - Version infomation of LibVlc. - - - - - Create LibVlcVersion from version string, it must like "2.2.0-Meta Weatherwax". - - version string - Can't parse libvlc version string, it must like "2.2.0-Meta Weatherwax". - - At least one component of version represents a number greater than - . - - - - - Version of LibVlc. - - - - - DevString of LibVlc. - - - - - Code name of LibVlc. - - - - - Check a function is available for this version. - - - - - - - A Vlc unmanaged object. - - - - - A pointer of this Vlc object. - - - - - A relation of this object. - - - - - A Vlc unmanaged object with Vlc event system. - - - - - Vlc event manager. - - - - - LibVlc dlls manager, load LibVlc and initialize LibVlc to use. Some public method also in this class, like - method. - - - - - LibVlc loaded or not. - - - - - Handle of libvlc.dll. - - - - - Handle of libvlccore.dll. - - - - - Directory of LibVlc dlls. - - - - - Version infomation of LibVlc. - - - - - Load LibVlc dlls, and mapping all function. - - directory of LibVlc - - Can't load LibVlc dlls, check the platform and LibVlc target platform - (should be same, x86 or x64). - - A custom attribute type cannot be loaded. - - For LibVlcFunction, need LibVlcFunctionAttribute to get Infomation - of function. - - Can't find function in dll. - Can't parse libvlc version string, it must like "2.2.0-Meta Weatherwax". - - At least one component of version represents a number greater than - . - - - - - Get version string of LibVlc. - - - - - - Get compiler infomation of LibVlc. - - - - - - Get changeset of LibVlc. - - - - - - Frees an heap allocation returned by a LibVLC function, like ANSI C free() method. - - the pointer of object to be released - A delegate callback throws an exception. - - - - Release a list of module descriptions. - - the list to be released - A delegate callback throws an exception. - - - - Frees the list of available audio output modules. - - a pointer of first . - A delegate callback throws an exception. - - - - Frees a list of available audio output devices. - - a pointer of first . - A delegate callback throws an exception. - - - - Release (free) pointer of . - - - A delegate callback throws an exception. - - - - Release media descriptor's elementary streams description array. - - pointer tracks info array to release - number of elements in the array - A delegate callback throws an exception. - - - - A warpper for struct. - - - - - Create a media track from a pointer, it will distinguish type of media track auto. - - pointer of media track - a audio track, video track, subtitle track or unknow track - - - - A warpper for struct. - - - - - A warpper for struct. - - - - - A warpper for struct. - - - - - A warpper for orther media track. - - - - - A list warpper for struct. - - - - - Create a list of media track from a pointer of array. - - pointer of media track array - count of media track array - - - - A warpper for struct. - - - - - A list warpper for linklist struct. - - - - - Create a readonly list by a pointer of . - - - - - - Preset audio equlizer type. - - - - - A struct with width and height, for downward compatibility. - - - - - A warpper for struct. - - - - - A list warpper for linklist struct. - - - - - Create a readonly list by a pointer of . - - - - - - 创建并初始化一个 LibVlc 实例,并提供相应的参数,这些参数和命令行提供的参数类似,会影响到 LibVlc 实例的默认配置. - 有效参数的列表取决于 LibVlc 版本,操作系统,可用 LibVlc 插件和平台.无效或不支持的参数会导致实例创建失败 - - - - - 递减 LibVlc 实例的引用计数,如果它达到零,将会释放这个实例 - - - - - 递增 LibVlc 实例的引用计数,当调用 NewInstance 初始化成功时,引用计数将初始化为1 - - - - - 尝试启动一个用户接口,用于 LibVlc 实例 - - - - - 为 LibVlc 设置一个回调,该回调将会在 LibVlc 退出时被调用,不能与 一起使用. - 而且,这个函数应该在播放一个列表或者开始一个用户接口前被调用,否则可能导致 LibVlc 在注册该回调前退出 - - - - - 等待,直到一个接口导致 LibVlc 实例退出为止,在使用之前,应该使用 添加至少一个用户接口. - 实际上这个方法只会导致一个线程阻塞,建议使用 - - - - - 设置一个用户代理字符串,当一个协议需要它的时候,LibVlc 将会提供该字符串 - - - - - 设置一些元信息关于该应用程序 - - - - - 获取可用的音频过滤器 - - - - - 获取可用的视频过滤器 - - - - - 使用默认的参数初始化一个 Vlc 实例 - - Can't create a Vlc instence, check your Vlc options. - A delegate callback throws an exception. - - - - 提供指定的参数初始化一个 Vlc 实例 - - - Can't create a Vlc instence, check your Vlc options. - A delegate callback throws an exception. - - - - 释放当前 Vlc 资源 - - - - - 获取一个值,该值指示当前模块是否被载入 - - - - - 获取 Vlc 实例的指针 - - - - A custom attribute type cannot be loaded. - - For LibVlcFunction, need LibVlcFunctionAttribute to get Infomation - of function. - - Can't find function in dll. - - - - 递增引用计数,在使用 Meta.Vlc 时,一般是不需要调用此方法,引用计数是由 Vlc 类托管的 - - - - - 尝试添加一个用户接口 - - 接口名,为 NULL 则为默认 - 是否成功添加接口 - - - - 等待,直到一个接口导致实例退出为止,在使用之前,应该使用 添加至少一个用户接口. - 实际上这个方法只会导致线程阻塞 - - - - - 设置一个用户代理字符串,当一个协议需要它的时候,将会提供该字符串 - - 应用程序名称,类似于 "FooBar player 1.2.3",实际上只要能标识应用程序,任何字符串都是可以的 - HTTP 用户代理,类似于 "FooBar/1.2.3 Python/2.6.0" - - - - 设置一些元信息关于该应用程序 - - Java 风格的应用标识符,类似于 "com.acme.foobar" - 应用程序版本,类似于 "1.2.3" - 应用程序图标,类似于 "foobar" - - - - 获取可用的音频过滤器 - - - - - 获取可用的视频过滤器 - - - - - 通过名称创建一个新的 VlcMedia - - 媒体名称 - - - - 通过给定的文件描述符创建一个新的 VlcMedia - - 文件描述符 - - - - 通过给定的文件 Url 创建一个新的 VlcMedia,该 Url 的格式必须以 "file://" 开头,参见 "RFC3986". - - 文件 Url - - - - 通过给定的文件路径创建一个新的 VlcMedia - - 文件路径 - - - - LibVlc error module loaded or not. - - - - - Get a readable error message. - - return a readable LibVlc error message, if there are no error will return - A delegate callback throws an exception. - - - - Clear error message of current thread. - - A delegate callback throws an exception. - - - - A manager of LibVlc event system. - - - - - Create a event manager with parent Vlc object and pointer of event manager. - - - - - - - LibVlc event module loaded or not. - - - - - Pointer of this event manager. - - - - - A relation of this object. - - - - - Release this event manager. - - A delegate callback throws an exception. - - - - Attach a event with a callback. - - event type - callback which will be called when event case - some custom data - - - - Deattach a event with a callback. - - event type - callback which will be called when event case - some custom data - - - - Get event type name. - - - - - - - The API warpper of LibVlc media. - - - - - 获取一个值,该值指示当前模块是否被载入 - - - - - 获取媒体的时间长度 - - - - - 获取该媒体的媒体资源地址 - - - - - 获取媒体当前状态 - - - - - 获取媒体当前统计 - - - - - 获取或设置由用户定义的媒体数据 - - - - - 获取一个值表示该媒体是否已经解析 - - - - - 获取 Media 实例指针 - - - - - 释放 VlcMedia 资源 - - - - - 载入 LibVlc 的 Media 模块,该方法会在 中自动被调用 - - - - - - - - 通过名称创建一个新的 VlcMedia - - Vlc 对象 - 媒体名称 - - - - 通过给定的文件描述符创建一个新的 VlcMedia - - Vlc 对象 - 文件描述符 - - - - 通过给定的文件 Url 创建一个新的 VlcMedia,该 Url 的格式必须以 "file://" 开头,参见 "RFC3986". - - Vlc 对象 - 文件 Url - - - - 通过给定的文件路径创建一个新的 VlcMedia - - Vlc 对象 - 文件路径 - - - - 向一个媒体添加选项,这个选项将会确定媒体播放器将如何读取介质, - - - - - - 向一个媒体通过可配置的标志添加一个选项,这个选项将会确定媒体播放器将如何读取介质, - - - - - - - 复制一个媒体对象 - - 复制的媒体对象 - - - - 获取媒体的基本编码器的说明 - - 得来 - 得来 - 返回媒体的基本编码器的说明 - - - - 获取媒体的某个元属性,如果尚未解析元属性,将会返回 NULL. - 这个方法会自动调用 方法,所以你在之后应该会收到一个 MediaMetaChanged 事件. - 如果你喜欢同步版本,可以在 GetMeta 之前调用 方法 - - 元属性类型 - 返回媒体的某个元属性 - - - - 获取媒体的基本流的描述,注意,在调用该方法之前你需要首先调用 方法,或者至少播放一次. - 否则,你将的得到一个空数组 - - 一个 数组 - - - - 解析一个媒体,获取媒体的元数据和轨道信息 - - - - - 异步解析一个媒体,获取媒体的元数据和轨道信息,这是 的异步版本, - 解析完成会触发 事件,您可以跟踪该事件 - - - - - 根据提供的标志异步解析一个媒体,获取媒体的元数据和轨道信息,这是 的高级版本, - 默认情况下解析一个本地文件,解析完成会触发 事件,您可以跟踪该事件 - - - - - 递增媒体对象的引用计数 - - - - - 保存当前的元数据到媒体 - - 如果操作成功将会返回 True - - - - 设置媒体的元数据 - - 元数据类型 - 元数据值 - - - - 获取媒体的基本流的描述,注意,在调用该方法之前你需要首先调用 方法,或者至少播放一次. - 否则,你将的得到一个空数组 - - - - - The lowest layer API warpper of LibVlc media player. - - - - - 获取一个值,该值指示当前模块是否被载入 - - - - - 载入 LibVlc 的 MediaPlayer 模块,该方法会在 中自动被调用 - - - - - - - - 获取一个值,该值表示 是否正在播放 - - - - - 获取或设置一个值,该值表示 的播放进度,范围为0.0~1.0 - - - - - 获取或设置一个值,该值表示 通过GDI的方式,将视频渲染到指定的窗口句柄 - - - - - 获取一个值,该值表示 目前媒体的长度 - - - - - 获取或设置一个值,该值表示当前媒体播放进度 - - - - - 获取或设置一个值,该值表示当前 播放的章节 - - - - - 获取一个值,该值表示媒体共有多少个章节 - - - - - 获取一个值,该值表示现在媒体是否可以进行播放 - - - - - 获取或设置一个值,该值表示 当前播放的标题 - - - - - 获取或设置一个值,该值表示当前媒体的播放速率 - - - - - 获取一个值,该值示当前媒体状态 - - - - - 获取一个值,该值表示当前媒体的FPS - - - - - 获取一个值,该值表示当前拥有的视频输出数量 - - - - - 获取一个值,该值表示当前媒体是否允许跳进度 - - - - - 获取一个值,该值表示当前媒体是否允许暂停 - - - - - 获取或设置一个值,该值表示当前媒体音频的音量 - - - - - 获取或设置一个值,该值表示当前媒体是否静音 - - - - - 获取或设置一个值,该值表示音频输出通道 - - - - - 使 开始播放 - - - - - 设置 播放或者暂停 - - true 代表暂停,false 代表播放或继续 - - - - 设置 为暂停 - - - - - 设置 为播放 - - - - - 当播放时设置 为暂停,反之为播放 - - - - - 设置 为停止 - - - - - 设置 Audio 的格式 - - 格式字符串,一个四字符的字符串 - 采样率 - 通道数 - - - - 播放上一个章节 - - - - - 播放下一个章节 - - - - - 播放下一帧 - - - - - 设置标题显示位置 - - 位置 - 显示时间 - - - - 切换静音状态 - - - - - 获取鼠标坐标 - - 视频输出号 - - - - - - Apply new equalizer settings to a media player. - - The media player does not keep a reference to the supplied equalizer so you should set it again when you changed - some value of equalizer. - - After you set equalizer you can dispose it. if you want to disable equalizer set it to . - - - - - - Gets a list of potential audio output devices. - - - - - - Gets a list of audio output devices for a given audio output module. - - - - - - - Gets the list of available audio output modules. - - - - - - Selects an audio output module. - Any change will take be effect only after playback is stopped and restarted. Audio output cannot be changed while - playing. - - - - - - - Get the current audio output device identifier. - - - - - Configures an explicit audio output device. If the module paramater is NULL, - audio output will be moved to the device specified by the device identifier string immediately. - This is the recommended usage. A list of adequate potential device strings can be obtained with - . - However passing NULL is supported in LibVLC version 2.2.0 and later only; in earlier versions, this function would - have no effects when the module parameter was NULL. - If the module parameter is not NULL, the device parameter of the corresponding audio output, if it exists, will be - set to the specified string. - Note that some audio output modules do not have such a parameter (notably MMDevice and PulseAudio). - A list of adequate potential device strings can be obtained with . - - - - - 释放 VlcMedia 资源 - - - - - Some method of Win32 APIs. - - - - - 进程调用 LoadLibrary 以显式链接到 DLL,如果函数执行成功,它会将指定的 DLL 映射到调用进程的地址空间中并返回该 DLL 的句柄,此句柄可以与其他函数(如 GetProcAddress 和 - FreeLibrary)一起在显式链接中使用 - LoadLibrary 将尝试使用用于隐式链接的相同搜索序列来查找 DLL.如果系统无法找到所需的 DLL 或者入口点函数返回 FALSE.则 LoadLibrary 将抛出异常.如果对 LoadLibrary 的调用所指定的 - DLL 模块已映射到调用进程的地址空间中,则该函数将返回该 DLL 的句柄并递增模块的引用数 - - DLL 模块地址 - 返回 DLL 模块句柄,如果出错将抛出异常 - - - - 显式链接到 DLL 的进程调用 GetProcAddress 来获取 DLL 导出函数的地址,由于是通过指针调用 DLL 函数并且没有编译时类型检查,需确保函数的参数是正确的,以便不会超出在堆栈上分配的内存和不会导致访问冲突 - - DLL 模块句柄 - 调用的函数名 - 返回函数地址 - - - - 不再需要 DLL 模块时,显式链接到 DLL 的进程调用 FreeLibrary 函数.此函数递减模块的引用数,如果引用数为零,此函数便从进程的地址空间中取消模块的映射 - - DLL 模块句柄 - 如果成功会返回 true ,否则会返回 false,请通过 GetLastError 获取更多信息 - - - - 创建一个新的文件映射内核对象 - - 指定欲在其中创建映射的一个文件句柄,为0xFFFFFFFF则表示创建一个内存文件映射 - 它指明返回的句柄是否可以被子进程所继承,使用 NULL 表示使用默认安全设置 - 指定文件映射对象的页面保护 - 表示映射文件大小的高32位 - 表示映射文件大小的低32位 - 指定文件映射对象的名字,如果为 NULL 则会创建一个无名称的文件映射对象 - 返回文件映射对象指针,如果错误将返回 NULL,请通过 GetLastError 获取更多信息 - - - - 将一个文件映射对象映射到当前应用程序的地址空间 - - 文件映射对象的句柄 - 映射对象的文件数据的访问方式,而且同样要与 CreateFileMapping 函数所设置的保护属性相匹配 - 表示文件映射起始偏移的高32位 - 表示文件映射起始偏移的低32位 - 指定映射文件的字节数 - 返回文件映射在内存中的起始地址,如果错误将返回 NULL,请通过 GetLastError 获取更多信息 - - - - 关闭一个内核对象.其中包括文件,文件映射,进程,线程,安全和同步对象等 - - 欲关闭的一个对象的句柄 - 如果成功会返回 true ,否则会返回 false,请通过 GetLastError 获取更多信息 - - - diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.dll b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.dll deleted file mode 100644 index 8497328..0000000 Binary files a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.dll and /dev/null differ diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.pdb b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.pdb deleted file mode 100644 index 1605756..0000000 Binary files a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Meta.Vlc.pdb and /dev/null differ diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Plugin.cs b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Plugin.cs index be449d6..d8e0630 100644 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Plugin.cs +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/Plugin.cs @@ -18,11 +18,10 @@ using System; using System.IO; using System.Linq; -using Meta.Vlc; -using Meta.Vlc.Interop.Media; -using Meta.Vlc.Wpf; -using Size = System.Windows.Size; -using VideoTrack = Meta.Vlc.VideoTrack; +using System.Reflection; +using System.Windows; +using QuickLook.Plugin.VideoViewer.FFmpeg; +using Unosquare.FFME; namespace QuickLook.Plugin.VideoViewer { @@ -38,21 +37,32 @@ namespace QuickLook.Plugin.VideoViewer ".flac", ".gsm", ".iklax", ".ivs", ".m4a", ".m4b", ".m4p", ".mmf", ".mp3", ".mpc", ".msv", ".ogg", ".oga", ".mogg", ".opus", ".ra", ".rm", ".raw", ".tta", ".vox", ".wav", ".wma", ".wv", ".webm" }; - private ContextObject _context; + private ContextObject _context; private ViewerPanel _vp; + private FFprobe probe; + public int Priority => 0 - 10; // make it lower than TextViewer public bool AllowsTransparency => true; public void Init() { - ApiManager.Initialize(VlcSettings.LibVlcPath, VlcSettings.VlcOptions); + MediaElement.FFmpegDirectory = Path.Combine( + Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "FFmpeg\\", + App.Is64Bit ? "x64\\" : "x86\\"); } public bool CanHandle(string path) { - return !Directory.Exists(path) && Formats.Contains(Path.GetExtension(path).ToLower()); + if (Directory.Exists(path)) + return false; + + return Formats.Contains(Path.GetExtension(path).ToLower()); + + //FFprobe is much slower than fixed extensions + //probe = new FFprobe(path); + //return probe.CanDecode() & (probe.HasAudio() | probe.HasVideo()); } public void Prepare(string path, ContextObject context) @@ -60,16 +70,9 @@ namespace QuickLook.Plugin.VideoViewer _context = context; var def = new Size(500, 300); + probe = probe ?? new FFprobe(path); - var hasVideo = HasVideoStream(path, out Size mediaSize); - - var windowSize = mediaSize == Size.Empty ? def : mediaSize; - windowSize.Width = Math.Max(def.Width, windowSize.Width); - windowSize.Height = Math.Max(def.Height, windowSize.Height); - - context.SetPreferredSizeFit(windowSize, 0.6); - context.TitlebarOverlap = true; - if (!hasVideo) + if (!probe.HasVideo()) { context.CanResize = false; context.TitlebarAutoHide = false; @@ -80,52 +83,47 @@ namespace QuickLook.Plugin.VideoViewer { context.TitlebarAutoHide = true; context.UseDarkTheme = true; + context.CanResize = true; + context.TitlebarAutoHide = true; + context.TitlebarBlurVisibility = true; + context.TitlebarColourVisibility = true; } + + var windowSize = probe.GetViewSize() == Size.Empty ? def : probe.GetViewSize(); + windowSize.Width = Math.Max(def.Width, windowSize.Width); + windowSize.Height = Math.Max(def.Height, windowSize.Height); + + context.SetPreferredSizeFit(windowSize, 0.6); + context.TitlebarOverlap = true; } public void View(string path, ContextObject context) { - _vp = new ViewerPanel(context); + _vp = new ViewerPanel(context, probe.HasVideo()); context.ViewerContent = _vp; - _vp.mediaElement.VlcMediaPlayer.Opening += MarkReady; - + _vp.mediaElement.MediaOpened += MediaElement_MediaOpened; _vp.LoadAndPlay(path); context.Title = $"{Path.GetFileName(path)}"; + + } + + private void MediaElement_MediaOpened(object sender, RoutedEventArgs e) + { + _context.IsBusy = false; } public void Cleanup() { - if (_vp != null) - _vp.mediaElement.VlcMediaPlayer.Opening -= MarkReady; + if (_vp?.mediaElement != null) + _vp.mediaElement.MediaOpened -= MediaElement_MediaOpened; + _vp?.Dispose(); _vp = null; _context = null; } - - private void MarkReady(object sender, ObjectEventArgs e) - { - _context.IsBusy = false; - } - - private bool HasVideoStream(string path, out Size size) - { - using (var vlc = new Vlc(VlcSettings.VlcOptions)) - { - using (var media = vlc.CreateMediaFromPath(path)) - { - media.Parse(); - var tracks = media.GetTracks(); - var video = tracks.FirstOrDefault(mt => mt.Type == TrackType.Video) as VideoTrack; - - size = video == null ? Size.Empty : new Size(video.Width, video.Height); - - return video != null; - } - } - } } } \ No newline at end of file diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/QuickLook.Plugin.VideoViewer.csproj b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/QuickLook.Plugin.VideoViewer.csproj index c1dbaaa..8b96fe0 100644 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/QuickLook.Plugin.VideoViewer.csproj +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/QuickLook.Plugin.VideoViewer.csproj @@ -59,11 +59,11 @@ key.snk - - .\Meta.Vlc.dll + + References\ffme.dll - - .\Meta.Vlc.Wpf.dll + + ..\..\packages\taglib.2.1.0.0\lib\policy.2.0.taglib-sharp.dll @@ -71,12 +71,16 @@ 4.0 + + + ..\..\packages\taglib.2.1.0.0\lib\taglib-sharp.dll + - + MSBuild:Compile Designer @@ -100,6 +104,7 @@ Code + diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/FFmpeg.AutoGen.dll b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/FFmpeg.AutoGen.dll new file mode 100644 index 0000000..4c1a775 Binary files /dev/null and b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/FFmpeg.AutoGen.dll differ diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/FFmpeg.AutoGen.xml b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/FFmpeg.AutoGen.xml new file mode 100644 index 0000000..97d17fc --- /dev/null +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/FFmpeg.AutoGen.xml @@ -0,0 +1,6198 @@ + + + + FFmpeg.AutoGen + + + + Message types used by avdevice_app_to_dev_control_message(). + + + Dummy message. + + + Window size change message. + + + Repaint request message. + + + Request pause/play. + + + Request pause/play. + + + Request pause/play. + + + Volume control message. + + + Mute control messages. + + + Mute control messages. + + + Mute control messages. + + + Get volume/mute messages. + + + Get volume/mute messages. + + + Not part of ABI + + + Location of chroma samples. + + + MPEG-2/4 4:2:0, H.264 default for 4:2:0 + + + MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 + + + ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 + + + Not part of ABI + + + not part of ABI/API + + + Identify the syntax and semantics of the bitstream. The principle is roughly: Two decoders with the same ID can decode the same streams. Two encoders with the same ID can encode compatible streams. There may be slight deviations from the principle due to implementation details. + + + preferred ID for MPEG-1/2 video decoding + + + A dummy id pointing at the start of audio codecs + + + preferred ID for decoding MPEG audio layer 1, 2 or 3 + + + as in Berlin toast format + + + A dummy ID pointing at the start of subtitle codecs. + + + raw UTF-8 text + + + A dummy ID pointing at the start of various fake codecs. + + + Contain timestamp estimated through PCR of program stream. + + + codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + + _FAKE_ codec to indicate a raw MPEG-2 TS stream (only used by libavformat) + + + _FAKE_ codec to indicate a MPEG-4 Systems stream (only used by libavformat) + + + Dummy codec for streams containing only metadata information. + + + Passthrough codec, AVFrames wrapped in AVPacket + + + Chromaticity coordinates of the source primaries. + + + also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + + + also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + + also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + + + also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + + + functionally identical to above + + + colour filters using Illuminant C + + + ITU-R BT2020 + + + SMPTE ST 428-1 (CIE 1931 XYZ) + + + SMPTE ST 431-2 (2011) / DCI P3 + + + SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + + + JEDEC P22 phosphors + + + Not part of ABI + + + MPEG vs JPEG YUV range. + + + the normal 219*2^(n-8) "MPEG" YUV ranges + + + the normal 2^n-1 "JPEG" YUV ranges + + + Not part of ABI + + + YUV colorspace type. + + + order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + + + also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + + + FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + + also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + + + also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + + + functionally identical to above + + + Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + + + ITU-R BT2020 non-constant luminance system + + + ITU-R BT2020 constant luminance system + + + SMPTE 2085, Y'D'zD'x + + + Not part of ABI + + + Color Transfer Characteristic. + + + also ITU-R BT1361 + + + also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + + + also ITU-R BT470BG + + + also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + + + "Linear transfer characteristics" + + + "Logarithmic transfer characteristic (100:1 range)" + + + "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + + + IEC 61966-2-4 + + + ITU-R BT1361 Extended Colour Gamut + + + IEC 61966-2-1 (sRGB or sYCC) + + + ITU-R BT2020 for 10-bit system + + + ITU-R BT2020 for 12-bit system + + + SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + + + SMPTE ST 428-1 + + + ARIB STD-B67, known as "Hybrid log-gamma" + + + Not part of ABI + + + Message types used by avdevice_dev_to_app_control_message(). + + + Dummy message. + + + Create window buffer message. + + + Prepare window buffer message. + + + Display window buffer message. + + + Destroy window buffer message. + + + Buffer fullness status messages. + + + Buffer fullness status messages. + + + Buffer readable/writable. + + + Buffer readable/writable. + + + Mute state change message. + + + Volume level change message. + + + discard nothing + + + discard useless packets like 0 size packets in avi + + + discard all non reference + + + discard all bidirectional frames + + + discard all non intra frames + + + discard all frames except keyframes + + + discard all + + + The duration of a video can be estimated through various ways, and this enum can be used to know how the duration was estimated. + + + Duration accurately estimated from PTSes + + + Duration estimated from a stream with a known duration + + + Duration estimated from bitrate (less accurate) + + + stage of the initialization of the link properties (dimensions, etc) + + + not started + + + started, but incomplete + + + complete + + + @{ AVFrame is an abstraction for reference-counted raw multimedia data. + + + The data is the AVPanScan struct defined in libavcodec. + + + ATSC A53 Part 4 Closed Captions. A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. The number of bytes of CC data is AVFrameSideData.size. + + + Stereoscopic 3d metadata. The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + + + The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + + + Metadata relevant to a downmix procedure. The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + + + ReplayGain information in the form of the AVReplayGain struct. + + + This side data contains a 3x3 transformation matrix describing an affine transformation that needs to be applied to the frame for correct presentation. + + + Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVActiveFormatDescription enum. + + + Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec AVCodecContext flags2 option). The data is the AVMotionVector struct defined in libavutil/motion_vector.h. + + + Recommmends skipping the specified number of samples. This is exported only if the "skip_manual" AVOption is set in libavcodec. This has the same format as AV_PKT_DATA_SKIP_SAMPLES. + + + This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defined in avcodec.h. + + + Mastering display metadata associated with a video frame. The payload is an AVMasteringDisplayMetadata type and contains information about the mastering display color volume. + + + The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. This is set on the first frame of a GOP that has a temporal reference of 0. + + + The data represents the AVSphericalMapping structure defined in libavutil/spherical.h. + + + Transfer the data from the queried hw frame. + + + Transfer the data to the queried hw frame. + + + Different data types that can be returned via the AVIO write_data_type callback. + + + Header data; this needs to be present for the stream to be decodeable. + + + A point in the output bytestream where a decoder can start decoding (i.e. a keyframe). A demuxer/decoder given the data flagged with AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT, should give decodeable results. + + + A point in the output bytestream where a demuxer can start parsing (for non self synchronizing bytestream formats). That is, any non-keyframe packet start point. + + + This is any, unlabelled data. It can either be a muxer not marking any positions at all, it can be an actual boundary/sync point that the muxer chooses not to mark, or a later part of a packet/fragment that is cut into multiple write callbacks due to limited IO buffer size. + + + Trailer data, which doesn't contain actual content, but only for finalizing the output file. + + + Directory entry types. + + + Lock operation used by lockmgr + + + Create a mutex + + + Lock the mutex + + + Unlock the mutex + + + Free mutex resources + + + Media Type + + + Usually treated as AVMEDIA_TYPE_DATA + + + Opaque data information usually continuous + + + Opaque data information usually sparse + + + @{ AVOptions provide a generic system to declare options on arbitrary structs ("objects"). An option can have a help text, a type and a range of possible values. Options may then be enumerated, read and written to. + + + offset must point to a pointer immediately followed by an int for the length + + + offset must point to two consecutive integers + + + offset must point to AVRational + + + Types and functions for working with AVPacket. @{ + + + An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette. This side data signals that a new palette is present. + + + The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was changed and the receiving side should act upon it appropriately. The new extradata is embedded in the side data buffer and should be immediately used for processing the current frame or packet. + + + An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + + + An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblocks relevant to splitting the packet into smaller packets on macroblock edges (e.g. as for RFC 2190). That is, it does not necessarily contain info about all macroblocks, as long as the distance between macroblocks in the info is smaller than the target payload size. Each MB info structure is 12 bytes, and is laid out as follows: + + + This side data should be associated with an audio stream and contains ReplayGain information in form of the AVReplayGain struct. + + + This side data contains a 3x3 transformation matrix describing an affine transformation that needs to be applied to the decoded video frames for correct presentation. + + + This side data should be associated with a video stream and contains Stereoscopic 3D information in form of the AVStereo3D struct. + + + This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType. + + + This side data contains quality related information from the encoder. + + + This side data contains an integer value representing the stream index of a "fallback" track. A fallback track indicates an alternate track to use when the current track can not be decoded for some reason. e.g. no decoder available for codec. + + + This side data corresponds to the AVCPBProperties struct. + + + Recommmends skipping the specified number of samples + + + An AV_PKT_DATA_JP_DUALMONO side data packet indicates that the packet may contain "dual mono" audio specific to Japanese DTV and if it is true, recommends only the selected channel to be used. + + + A list of zero terminated key/value strings. There is no end marker for the list, so it is required to rely on the side data size to stop. + + + Subtitle event position + + + Data found in BlockAdditional element of matroska container. There is no end marker for the data, so it is required to rely on the side data size to recognize the end. 8 byte id (as found in BlockAddId) followed by data. + + + The optional first identifier line of a WebVTT cue. + + + The optional settings (rendering instructions) that immediately follow the timestamp specifier of a WebVTT cue. + + + A list of zero terminated key/value strings. There is no end marker for the list, so it is required to rely on the side data size to stop. This side data includes updated metadata which appeared in the stream. + + + MPEGTS stream ID, this is required to pass the stream ID information from the demuxer to the corresponding muxer. + + + Mastering display metadata (based on SMPTE-2086:2014). This metadata should be associated with a video stream and containts data in the form of the AVMasteringDisplayMetadata struct. + + + This side data should be associated with a video stream and corresponds to the AVSphericalMapping structure. + + + The number of side data elements (in fact a bit more than it). This is not part of the public API/ABI in the sense that it may change when new side data types are added. This must stay the last enum value. If its value becomes huge, some code using it needs to be updated as it assumes it to be smaller than other limits. + + + @{ + + + @} @} + + + Undefined + + + Intra + + + Predicted + + + Bi-dir predicted + + + S(GMC)-VOP MPEG-4 + + + Switching Intra + + + Switching Predicted + + + BI type + + + Pixel format. + + + planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + + + packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + + + packed RGB 8:8:8, 24bpp, RGBRGB... + + + packed RGB 8:8:8, 24bpp, BGRBGR... + + + planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + + + planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + + + planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + + + planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + + + Y , 8bpp + + + Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + + + Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + + + 8 bits with AV_PIX_FMT_RGB32 palette + + + planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + + + planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + + + planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range + + + XVideo Motion Acceleration via common packet passing + + + packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + + + packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + + + packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + + + packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + + + packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + + + packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + + + packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + + + packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + + + planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + + + as above, but U and V bytes are swapped + + + packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + + + packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + + + packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + + + packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + + Y , 16bpp, big-endian + + + Y , 16bpp, little-endian + + + planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + + + planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + + + planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) + + + H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + + MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + + MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + + WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + + VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + + packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + + + packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + + packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + + + packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + + + packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined + + + packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined + + + packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + + + packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + + + packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined + + + packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined + + + HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + + + HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + + + HW decoding through VA API, Picture.data[3] contains a VASurfaceID + + + @} + + + planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + + + planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + + + planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + + + planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + + planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + + + planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + + + MPEG-4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + + HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + + packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined + + + packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined + + + packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined + + + packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined + + + 8 bits gray, 8 bits alpha + + + alias for AV_PIX_FMT_YA8 + + + alias for AV_PIX_FMT_YA8 + + + packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + + + packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + + planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + + + planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + + + planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + + + planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + + + planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + + planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + + + planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + + + planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + + + planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + + + planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + + + planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + + planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + + + hardware decoding through VDA + + + planar GBR 4:4:4 24bpp + + + planar GBR 4:4:4 27bpp, big-endian + + + planar GBR 4:4:4 27bpp, little-endian + + + planar GBR 4:4:4 30bpp, big-endian + + + planar GBR 4:4:4 30bpp, little-endian + + + planar GBR 4:4:4 48bpp, big-endian + + + planar GBR 4:4:4 48bpp, little-endian + + + planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + + planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + + + planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + + + planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + + + planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + + + planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + + + planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + + planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + + + planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + + + planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + + + planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + + + planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + + + planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + + + planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + + planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + + + planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + + + planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + + + planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + + + planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + + + planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + + HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + + packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + + + packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + + + interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + + + interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + + + interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + + packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + + + packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + + packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + + + packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + + packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + + + HW acceleration through VDA, data[3] contains a CVPixelBufferRef + + + 16 bits gray, 16 bits alpha (big-endian) + + + 16 bits gray, 16 bits alpha (little-endian) + + + planar GBRA 4:4:4:4 32bpp + + + planar GBRA 4:4:4:4 64bpp, big-endian + + + planar GBRA 4:4:4:4 64bpp, little-endian + + + HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure. + + + HW acceleration though MMAL, data[3] contains a pointer to the MMAL_BUFFER_HEADER_T structure. + + + HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + + + HW acceleration through CUDA. data[i] contain CUdeviceptr pointers exactly as for system memory frames. + + + packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + + + packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + + + packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + + + packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + + + planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + + + planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + + + planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + + + planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + + + planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + + planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + + + planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + + planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + + + planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + + + planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + + + planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + + + planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + + + planar GBR 4:4:4 36bpp, big-endian + + + planar GBR 4:4:4 36bpp, little-endian + + + planar GBR 4:4:4 42bpp, big-endian + + + planar GBR 4:4:4 42bpp, little-endian + + + planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + + + bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + + + bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + + + bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + + + bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + + + bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + + + bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + + + bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + + + bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + + + bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + + + bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + + + bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + + + bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + + planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + + + planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + + + planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + + + planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + + + packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + + + packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + + hardware decoding through Videotoolbox + + + like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + + + like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + + + planar GBR 4:4:4:4 48bpp, big-endian + + + planar GBR 4:4:4:4 48bpp, little-endian + + + planar GBR 4:4:4:4 40bpp, big-endian + + + planar GBR 4:4:4:4 40bpp, little-endian + + + hardware decoding through MediaCodec + + + Y , 12bpp, big-endian + + + Y , 12bpp, little-endian + + + Y , 10bpp, big-endian + + + Y , 10bpp, little-endian + + + like NV12, with 16bpp per component, little-endian + + + like NV12, with 16bpp per component, big-endian + + + number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions + + + Rounding methods. + + + Round toward zero. + + + Round away from zero. + + + Round toward -infinity. + + + Round toward +infinity. + + + Round to nearest and halfway cases away from zero. + + + Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through unchanged, avoiding special cases for #AV_NOPTS_VALUE. + + + Audio sample formats + + + unsigned 8 bits + + + signed 16 bits + + + signed 32 bits + + + float + + + double + + + unsigned 8 bits, planar + + + signed 16 bits, planar + + + signed 32 bits, planar + + + float, planar + + + double, planar + + + signed 64 bits + + + signed 64 bits, planar + + + Number of sample formats. DO NOT USE if linking dynamically + + + @} + + + full parsing and repack + + + Only parse headers, do not repack. + + + full parsing and interpolation of timestamps for frames not starting on a packet boundary + + + full parsing and repack of the first frame only, only implemented for H.264 currently + + + full parsing and repack with timestamp and position generation by parser for raw this assumes that each packet in the file contains no demuxer level headers and just codec level data, otherwise position generation would fail + + + A bitmap, pict will be set + + + Plain text, the text field must be set by the decoder and is authoritative. ass and pict fields may contain approximations. + + + Formatted text, the ass field must be set by the decoder and is authoritative. pict and text fields may contain approximations. + + + timecode is drop frame + + + timecode wraps after 24 hours + + + negative time values are allowed + + + no search, that is use 0,0 vector whenever one is needed + + + enhanced predictive zonal search + + + reserved for experiments + + + hexagon based search + + + uneven multi-hexagon search + + + transformed exhaustive search algorithm + + + iterative search + + + Dithering algorithms + + + not part of API/ABI + + + not part of API/ABI + + + Resampling Engines + + + SW Resampler + + + SoX Resampler + + + not part of API/ABI + + + Resampling Filter Types + + + Cubic + + + Blackman Nuttall windowed sinc + + + Kaiser windowed sinc + + + Free resample context. + a non-NULL pointer to a resample context previously created with av_audio_resample_init() + + + Initialize audio resampling context. + number of output channels + number of input channels + output sample rate + input sample rate + requested output sample format + input sample format + length of each FIR filter in the filterbank relative to the cutoff frequency + log2 of the number of entries in the polyphase filterbank + if 1 then the used FIR filter will be linearly interpolated between the 2 closest, if 0 the closest will be used + cutoff frequency, 1.0 corresponds to half the output sampling rate + + + Release bitstream filter context. + the bitstream filter context created with av_bitstream_filter_init(), can be NULL + + + Filter bitstream. + bitstream filter context created by av_bitstream_filter_init() + AVCodecContext accessed by the filter, may be NULL. If specified, this must point to the encoder context of the output stream the packet is sent to. + arguments which specify the filter configuration, may be NULL + pointer which is updated to point to the filtered buffer + pointer which is updated to the filtered buffer size in bytes + buffer containing the data to filter + size in bytes of buf + set to non-zero if the buffer to filter corresponds to a key-frame packet data + + + Create and initialize a bitstream filter context given a bitstream filter name. + the name of the bitstream filter + + + If f is NULL, return the first registered bitstream filter, if f is non-NULL, return the next registered bitstream filter after f, or NULL if f is the last one. + + + Allocate a context for a given bitstream filter. The caller must fill in the context parameters as described in the documentation and then call av_bsf_init() before sending any data to the filter. + the filter for which to allocate an instance. + a pointer into which the pointer to the newly-allocated context will be written. It must be freed with av_bsf_free() after the filtering is done. + + + Free a bitstream filter context and everything associated with it; write NULL into the supplied pointer. + + + Returns a bitstream filter with the specified name or NULL if no such bitstream filter exists. + + + Get the AVClass for AVBSFContext. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Get null/pass-through bitstream filter. + Pointer to be set to new instance of pass-through bitstream filter + + + Prepare the filter for use, after all the parameters and options have been set. + + + Allocate empty list of bitstream filters. The list must be later freed by av_bsf_list_free() or finalized by av_bsf_list_finalize(). + + + Append bitstream filter to the list of bitstream filters. + List to append to + Filter context to be appended + + + Construct new bitstream filter context given it's name and options and append it to the list of bitstream filters. + List to append to + Name of the bitstream filter + Options for the bitstream filter, can be set to NULL + + + Finalize list of bitstream filters. + Filter list structure to be transformed + Pointer to be set to newly created + + + Free list of bitstream filters. + Pointer to pointer returned by av_bsf_list_alloc() + + + Parse string describing list of bitstream filters and create single Resulting allocated by av_bsf_alloc(). + String describing chain of bitstream filters in format `bsf1[=opt1=val1:opt2=val2][,bsf2]` + Pointer to be set to newly created + + + Iterate over all registered bitstream filters. + a pointer where libavcodec will store the iteration state. Must point to NULL to start the iteration. + + + Retrieve a filtered packet. + this struct will be filled with the contents of the filtered packet. It is owned by the caller and must be freed using av_packet_unref() when it is no longer needed. This parameter should be "clean" (i.e. freshly allocated with av_packet_alloc() or unreffed with av_packet_unref()) when this function is called. If this function returns successfully, the contents of pkt will be completely overwritten by the returned data. On failure, pkt is not touched. + + + Submit a packet for filtering. + the packet to filter. pkt must contain some payload (i.e data or side data must be present in pkt). The bitstream filter will take ownership of the packet and reset the contents of pkt. pkt is not touched if an error occurs. This parameter may be NULL, which signals the end of the stream (i.e. no more packets will be sent). That will cause the filter to output any packets it may have buffered internally. + + + Returns a non-zero number if codec is a decoder, zero otherwise + + + Returns a non-zero number if codec is an encoder, zero otherwise + + + If c is NULL, returns the first registered codec, if c is non-NULL, returns the next registered codec after c, or NULL if c is the last one. + + + Copy packet, including contents + + + Copy packet side data + + + Allocate a CPB properties structure and initialize its fields to default values. + if non-NULL, the size of the allocated struct will be written here. This is useful for embedding it in side data. + + + Allocate an AVD3D11VAContext. + + + Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + + + Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call. + + + Free a packet. + packet to free + + + Return audio frame duration. + codec context + size of the frame, or 0 if unknown + + + This function is the same as av_get_audio_frame_duration(), except it works with AVCodecParameters instead of an AVCodecContext. + + + Return codec bits per sample. + the codec + + + Put a string representing the codec tag codec_tag in buf. + buffer to place codec tag in + size in bytes of buf + codec tag to assign + + + Return codec bits per sample. Only return non-zero if the bits per sample is exactly correct, not an approximation. + the codec + + + Return the PCM codec associated with a sample format. + endianness, 0 for little, 1 for big, -1 (or anything else) for native + + + Return a name for the specified profile, if available. + the codec that is searched for the given profile + the profile value for which a name is requested + + + Increase packet size, correctly zeroing padding + packet + number of bytes by which to increase the size of the packet + + + If hwaccel is NULL, returns the first registered hardware accelerator, if hwaccel is non-NULL, returns the next registered hardware accelerator after hwaccel, or NULL if hwaccel is the last one. + + + Initialize optional fields of a packet with default values. + packet + + + Register a user provided lock manager supporting the operations specified by AVLockOp. The "mutex" argument to the function points to a (void *) where the lockmgr should store/get a pointer to a user allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the value left by the last call for all other ops. If the lock manager is unable to perform the op then it should leave the mutex in the same state as when it was called and return a non-zero value. However, when called with AV_LOCK_DESTROY the mutex will always be assumed to have been successfully destroyed. If av_lockmgr_register succeeds it will return a non-negative value, if it fails it will return a negative value and destroy all mutex and unregister all callbacks. av_lockmgr_register is not thread-safe, it must be called from a single thread before any calls which make use of locking are used. + User defined callback. av_lockmgr_register invokes calls to this callback and the previously registered callback. The callback will be used to create more than one mutex each of which must be backed by its own underlying locking mechanism (i.e. do not use a single static object to implement your lock manager). If cb is set to NULL the lockmgr will be unregistered. + + + Log a generic warning message asking for a sample. This function is intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) only, and would normally not be used by applications. + a pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct + string containing an optional message, or NULL if no message + + + Log a generic warning message about a missing feature. This function is intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) only, and would normally not be used by applications. + a pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct + string containing the name of the missing feature + indicates if samples are wanted which exhibit this feature. If want_sample is non-zero, additional verbiage will be added to the log message which tells the user how to report samples to the development mailing list. + + + Allocate the payload of a packet and initialize its fields with default values. + packet + wanted payload size + + + Wrap an existing array as a packet side data. + packet + side information type + the side data array. It must be allocated with the av_malloc() family of functions. The ownership of the data is transferred to pkt. + side information size + + + Allocate an AVPacket and set its fields to default values. The resulting struct must be freed using av_packet_free(). + + + Create a new packet that references the same data as src. + + + Copy only "properties" fields from src to dst. + Destination packet + Source packet + + + Free the packet, if the packet is reference counted, it will be unreferenced first. + + + Convenience function to free all the side data stored. All the other fields stay untouched. + packet + + + Initialize a reference-counted packet from av_malloc()ed data. + packet to be initialized. This function will set the data, size, buf and destruct fields, all others are left untouched. + Data allocated by av_malloc() to be used as packet data. If this function returns successfully, the data is owned by the underlying AVBuffer. The caller may not access the data through other means. + size of data in bytes, without the padding. I.e. the full buffer size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE. + + + Get side information from packet. + packet + desired side information type + pointer for side information size to store (optional) + + + Move every field in src to dst and reset src. + Destination packet + Source packet, will be reset + + + Allocate new information of a packet. + packet + side information type + side information size + + + Pack a dictionary for use in side_data. + The dictionary to pack. + pointer to store the size of the returned data + + + Setup a new reference to the data described by a given packet + Destination packet + Source packet + + + Convert valid timing fields (timestamps / durations) in a packet from one timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be ignored. + packet on which the conversion will be performed + source timebase, in which the timing fields in pkt are expressed + destination timebase, to which the timing fields will be converted + + + Shrink the already allocated side data buffer + packet + side information type + new side information size + + + Unpack a dictionary from side_data. + data from side_data + size of the data + the metadata storage dictionary + + + Wipe the packet. + The packet to be unreferenced. + + + Returns 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed use AVBitStreamFilter + + + Parse a packet. + parser context. + codec context. + set to pointer to parsed buffer or NULL if not yet finished. + set to size of parsed buffer or zero if not yet finished. + input buffer. + buffer size in bytes without the padding. I.e. the full buffer size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. To signal EOF, this should be 0 (so that the last frame can be output). + input presentation timestamp. + input decoding timestamp. + input byte position in stream. + + + Register a bitstream filter. + + + Register the hardware accelerator hwaccel. + + + Resample an array of samples using a previously configured context. + an array of unconsumed samples + the number of samples of src which have been consumed are returned here + the number of unconsumed samples available + the amount of space in samples available in dst + If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + + + Compensate samplerate/timestamp drift. The compensation is done by changing the resampler parameters, so no audible clicks or similar distortions occur + number of output samples which should be output less + distance in output samples over which the compensation should be performed + + + Initialize an audio resampler. Note, if either rate is not an integer then simply scale both rates up so they are. + length of each FIR filter in the filterbank relative to the cutoff freq + log2 of the number of entries in the polyphase filterbank + If 1 then the used FIR filter will be linearly interpolated between the 2 closest, if 0 the closest will be used + cutoff frequency, 1.0 corresponds to half the output sampling rate + + + Reduce packet size, correctly zeroing padding + packet + new size + + + Encode extradata length to a buffer. Used by xiph codecs. + buffer to write to; must be at least (v/255+1) bytes long + size of extradata in bytes + + + Modify width and height values so that they will result in a memory buffer that is acceptable for the codec if you do not use any horizontal padding. + + + Modify width and height values so that they will result in a memory buffer that is acceptable for the codec if you also ensure that all line sizes are a multiple of the respective linesize_align[i]. + + + Allocate an AVCodecContext and set its fields to default values. The resulting struct should be freed with avcodec_free_context(). + if non-NULL, allocate private data and initialize defaults for the given codec. It is illegal to then call avcodec_open2() with a different codec. If NULL, then the codec-specific defaults won't be initialized, which may result in suboptimal default settings (this is important mainly for encoders, e.g. libx264). + + + Converts swscale x/y chroma position to AVChromaLocation. + horizontal chroma sample position + vertical chroma sample position + + + Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext itself). + + + Return the libavcodec build-time configuration. + + + Copy the settings of the source AVCodecContext into the destination AVCodecContext. The resulting destination codec context will be unopened, i.e. you are required to call avcodec_open2() before you can use this AVCodecContext to decode/encode video/audio data. + target codec context, should be initialized with avcodec_alloc_context3(NULL), but otherwise uninitialized + source codec context + + + Decode the audio frame of size avpkt->size from avpkt->data into frame. + the codec context + The AVFrame in which to store decoded audio samples. The decoder will allocate a buffer for the decoded frame by calling the AVCodecContext.get_buffer2() callback. When AVCodecContext.refcounted_frames is set to 1, the frame is reference counted and the returned reference belongs to the caller. The caller must release the frame using av_frame_unref() when the frame is no longer needed. The caller may safely write to the frame if av_frame_is_writable() returns 1. When AVCodecContext.refcounted_frames is set to 0, the returned reference belongs to the decoder and is valid only until the next call to this function or until closing or flushing the decoder. The caller may not write to it. + Zero if no frame could be decoded, otherwise it is non-zero. Note that this field being set to zero does not mean that an error has occurred. For decoders with AV_CODEC_CAP_DELAY set, no given decode call is guaranteed to produce a frame. + The input AVPacket containing the input buffer. At least avpkt->data and avpkt->size should be set. Some decoders might also require additional fields to be set. + + + Decode a subtitle message. Return a negative value on error, otherwise return the number of bytes used. If no subtitle could be decompressed, got_sub_ptr is zero. Otherwise, the subtitle is stored in *sub. Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for simplicity, because the performance difference is expect to be negligible and reusing a get_buffer written for video codecs would probably perform badly due to a potentially very different allocation pattern. + the codec context + The Preallocated AVSubtitle in which the decoded subtitle will be stored, must be freed with avsubtitle_free if *got_sub_ptr is set. + Zero if no subtitle could be decompressed, otherwise, it is nonzero. + The input AVPacket containing the input buffer. + + + Decode the video frame of size avpkt->size from avpkt->data into picture. Some decoders may support multiple frames in a single AVPacket, such decoders would then just decode the first frame. + the codec context + The AVFrame in which the decoded video frame will be stored. Use av_frame_alloc() to get an AVFrame. The codec will allocate memory for the actual bitmap by calling the AVCodecContext.get_buffer2() callback. When AVCodecContext.refcounted_frames is set to 1, the frame is reference counted and the returned reference belongs to the caller. The caller must release the frame using av_frame_unref() when the frame is no longer needed. The caller may safely write to the frame if av_frame_is_writable() returns 1. When AVCodecContext.refcounted_frames is set to 0, the returned reference belongs to the decoder and is valid only until the next call to this function or until closing or flushing the decoder. The caller may not write to it. + Zero if no frame could be decompressed, otherwise, it is nonzero. + The input AVPacket containing the input buffer. You can create such packet with av_init_packet() and by then setting data and size, some decoders might in addition need other fields like flags &AV _PKT_FLAG_KEY. All decoders are designed to use the least fields possible. + + + The default callback for AVCodecContext.get_buffer2(). It is made public so it can be called by custom get_buffer2() implementations for decoders without AV_CODEC_CAP_DR1 set. + + + Returns descriptor for given codec ID or NULL if no descriptor exists. + + + Returns codec descriptor with the given name or NULL if no such descriptor exists. + + + Iterate over all codec descriptors known to libavcodec. + previous descriptor. NULL to get the first descriptor. + + + Encode a frame of audio. + codec context + output AVPacket. The user can supply an output buffer by setting avpkt->data and avpkt->size prior to calling the function, but if the size of the user-provided data is not large enough, encoding will fail. If avpkt->data and avpkt->size are set, avpkt->destruct must also be set. All other AVPacket fields will be reset by the encoder using av_init_packet(). If avpkt->data is NULL, the encoder will allocate it. The encoder will set avpkt->size to the size of the output packet. + AVFrame containing the raw audio data to be encoded. May be NULL when flushing an encoder that has the AV_CODEC_CAP_DELAY capability set. If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame can have any number of samples. If it is not set, frame->nb_samples must be equal to avctx->frame_size for all frames except the last. The final frame may be smaller than avctx->frame_size. + This field is set to 1 by libavcodec if the output packet is non-empty, and to 0 if it is empty. If the function returns an error, the packet can be assumed to be invalid, and the value of got_packet_ptr is undefined and should not be used. + + + Encode a frame of video. + codec context + output AVPacket. The user can supply an output buffer by setting avpkt->data and avpkt->size prior to calling the function, but if the size of the user-provided data is not large enough, encoding will fail. All other AVPacket fields will be reset by the encoder using av_init_packet(). If avpkt->data is NULL, the encoder will allocate it. The encoder will set avpkt->size to the size of the output packet. The returned data (if any) belongs to the caller, he is responsible for freeing it. + AVFrame containing the raw video data to be encoded. May be NULL when flushing an encoder that has the AV_CODEC_CAP_DELAY capability set. + This field is set to 1 by libavcodec if the output packet is non-empty, and to 0 if it is empty. If the function returns an error, the packet can be assumed to be invalid, and the value of got_packet_ptr is undefined and should not be used. + + + Converts AVChromaLocation to swscale x/y chroma position. + horizontal chroma sample position + vertical chroma sample position + + + Fill AVFrame audio data and linesize pointers. + the AVFrame frame->nb_samples must be set prior to calling the function. This function fills in frame->data, frame->extended_data, frame->linesize[0]. + channel count + sample format + buffer to use for frame data + size of buffer + plane size sample alignment (0 = default) + + + Find the best pixel format to convert to given a certain source pixel format. When converting from one pixel format to another, information loss may occur. For example, when converting from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of the given pixel formats should be used to suffer the least amount of loss. The pixel formats from which it chooses one, are determined by the pix_fmt_list parameter. + AV_PIX_FMT_NONE terminated array of pixel formats to choose from + source pixel format + Whether the source pixel format alpha channel is used. + Combination of flags informing you what kind of losses will occur. + + + Find a registered decoder with a matching codec ID. + AVCodecID of the requested decoder + + + Find a registered decoder with the specified name. + name of the requested decoder + + + Find a registered encoder with a matching codec ID. + AVCodecID of the requested encoder + + + Find a registered encoder with the specified name. + name of the requested encoder + + + Reset the internal decoder state / flush internal buffers. Should be called e.g. when seeking or when switching to a different stream. + + + Free the codec context and everything associated with it and write NULL to the provided pointer. + + + Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor. + the pixel format + store log2_chroma_w + store log2_chroma_h + + + Get the AVClass for AVCodecContext. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Return the amount of padding in pixels which the get_buffer callback must provide around the edge of the image for codecs which do not have the CODEC_FLAG_EMU_EDGE flag. + + + Get the AVClass for AVFrame. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Get the name of a codec. + + + Get the AVClass for AVSubtitleRect. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Get the type of the given codec. + + + Returns a positive value if s is open (i.e. avcodec_open2() was called on it with no corresponding avcodec_close()), 0 otherwise. + + + Return the libavcodec license. + + + Initialize the AVCodecContext to use the given AVCodec. Prior to using this function the context has to be allocated with avcodec_alloc_context3(). + The context to initialize. + The codec to open this context for. If a non-NULL codec has been previously passed to avcodec_alloc_context3() or for this context, then this parameter MUST be either NULL or equal to the previously passed codec. + A dictionary filled with AVCodecContext and codec-private options. On return this object will be filled with options that were not found. + + + Allocate a new AVCodecParameters and set its fields to default values (unknown/invalid/0). The returned struct must be freed with avcodec_parameters_free(). + + + Copy the contents of src to dst. Any allocated fields in dst are freed and replaced with newly allocated duplicates of the corresponding fields in src. + + + Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied pointer. + + + Fill the parameters struct based on the values from the supplied codec context. Any allocated fields in par are freed and replaced with duplicates of the corresponding fields in codec. + + + Fill the codec context based on the values from the supplied codec parameters. Any allocated fields in codec that have a corresponding field in par are freed and replaced with duplicates of the corresponding field in par. Fields in codec that do not have a counterpart in par are not touched. + + + Return a value representing the fourCC code associated to the pixel format pix_fmt, or 0 if no associated fourCC code can be found. + + + Return a name for the specified profile, if available. + the ID of the codec to which the requested profile belongs + the profile value for which a name is requested + + + Return decoded output data from a decoder. + codec context + This will be set to a reference-counted video or audio frame (depending on the decoder type) allocated by the decoder. Note that the function will always call av_frame_unref(frame) before doing anything else. + + + Read encoded data from the encoder. + codec context + This will be set to a reference-counted packet allocated by the encoder. Note that the function will always call av_frame_unref(frame) before doing anything else. + + + Register the codec codec and initialize libavcodec. + + + Register all the codecs, parsers and bitstream filters which were enabled at configuration time. If you do not call this function you can select exactly which formats you want to support, by using the individual registration functions. + + + Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet() to retrieve buffered output packets. + codec context + AVFrame containing the raw audio or video frame to be encoded. Ownership of the frame remains with the caller, and the encoder will not write to the frame. The encoder may create a reference to the frame data (or copy it if the frame is not reference-counted). It can be NULL, in which case it is considered a flush packet. This signals the end of the stream. If the encoder still has packets buffered, it will return them after this call. Once flushing mode has been entered, additional flush packets are ignored, and sending frames will return AVERROR_EOF. + + + Supply raw packet data as input to a decoder. + codec context + The input AVPacket. Usually, this will be a single video frame, or several complete audio frames. Ownership of the packet remains with the caller, and the decoder will not write to the packet. The decoder may create a reference to the packet data (or copy it if the packet is not reference-counted). Unlike with older APIs, the packet is always fully consumed, and if it contains multiple frames (e.g. some audio codecs), will require you to call avcodec_receive_frame() multiple times afterwards before you can send a new packet. It can be NULL (or an AVPacket with data set to NULL and size set to 0); in this case, it is considered a flush packet, which signals the end of the stream. Sending the first flush packet will return success. Subsequent ones are unnecessary and will return AVERROR_EOF. If the decoder still has frames buffered, it will return them after sending a flush packet. + + + Return the LIBAVCODEC_VERSION_INT constant. + + + Free all allocated data in the given subtitle struct. + AVSubtitle to free. + + + Audio input devices iterator. + + + Video input devices iterator. + + + Audio output devices iterator. + + + Video output devices iterator. + + + Send control message from application to device. + device context. + message type. + message data. Exact type depends on message type. + size of message data. + + + Initialize capabilities probing API based on AVOption API. + Device capabilities data. Pointer to a NULL pointer must be passed. + Context of the device. + An AVDictionary filled with device-private options. On return this parameter will be destroyed and replaced with a dict containing options that were not found. May be NULL. The same options must be passed later to avformat_write_header() for output devices or avformat_open_input() for input devices, or at any other place that affects device-private options. + + + Free resources created by avdevice_capabilities_create() + Device capabilities data to be freed. + Context of the device. + + + Return the libavdevice build-time configuration. + + + Send control message from device to application. + device context. + message type. + message data. Can be NULL. + size of message data. + + + Convenient function to free result of avdevice_list_devices(). + + + Return the libavdevice license. + + + List devices. + device context. + list of autodetected devices. + + + List devices. + device format. May be NULL if device name is set. + device name. May be NULL if device format is set. + An AVDictionary filled with device-private options. May be NULL. The same options must be passed later to avformat_write_header() for output devices or avformat_open_input() for input devices, or at any other place that affects device-private options. + list of autodetected devices + + + Initialize libavdevice and register all the input and output devices. + + + Return the LIBAVDEVICE_VERSION_INT constant. + + + Create an AVABufferSinkParams structure. + + + Get a frame with filtered data from sink and put it in frame. + pointer to a context of a buffersink or abuffersink AVFilter. + pointer to an allocated frame that will be filled with data. The data must be freed using av_frame_unref() / av_frame_free() + + + Get a frame with filtered data from sink and put it in frame. + pointer to a buffersink or abuffersink filter context. + pointer to an allocated frame that will be filled with data. The data must be freed using av_frame_unref() / av_frame_free() + a combination of AV_BUFFERSINK_FLAG_* flags + + + Same as av_buffersink_get_frame(), but with the ability to specify the number of samples read. This function is less efficient than av_buffersink_get_frame(), because it copies the data around. + pointer to a context of the abuffersink AVFilter. + pointer to an allocated frame that will be filled with data. The data must be freed using av_frame_unref() / av_frame_free() frame will contain exactly nb_samples audio samples, except at the end of stream, when it can contain less than nb_samples. + + + Get the properties of the stream @{ + + + Create an AVBufferSinkParams structure. + + + Set the frame size for an audio buffer sink. + + + Add a frame to the buffer source. + an instance of the buffersrc filter + frame to be added. If the frame is reference counted, this function will take ownership of the reference(s) and reset the frame. Otherwise the frame data will be copied. If this function returns an error, the input frame is not touched. + + + Add a frame to the buffer source. + pointer to a buffer source context + a frame, or NULL to mark EOF + a combination of AV_BUFFERSRC_FLAG_* + + + Get the number of failed requests. + + + Allocate a new AVBufferSrcParameters instance. It should be freed by the caller with av_free(). + + + Initialize the buffersrc or abuffersrc filter with the provided parameters. This function may be called multiple times, the later calls override the previous ones. Some of the parameters may also be set through AVOptions, then whatever method is used last takes precedence. + an instance of the buffersrc or abuffersrc filter + the stream parameters. The frames later passed to this filter must conform to those parameters. All the allocated fields in param remain owned by the caller, libavfilter will make internal copies or references when necessary. + + + Add a frame to the buffer source. + an instance of the buffersrc filter + frame to be added. If the frame is reference counted, this function will make a new reference to it. Otherwise the frame data will be copied. + + + If filter is NULL, returns a pointer to the first registered filter pointer, if filter is non-NULL, returns the next pointer after filter. If the returned pointer points to NULL, the last registered filter was already reached. + + + Negotiate the media format, dimensions, etc of all inputs to a filter. + the filter to negotiate the properties for its inputs + + + Return the libavfilter build-time configuration. + + + Free a filter context. This will also remove the filter from its filtergraph's list of filters. + the filter to free + + + Returns AVClass for AVFilterContext. + + + Add an existing filter instance to a filter graph. + the filter graph + the filter to be added + + + Allocate a filter graph. + + + Create a new filter instance in a filter graph. + graph in which the new filter will be used + the filter to create an instance of + Name to give to the new instance (will be copied to AVFilterContext.name). This may be used by the caller to identify different filters, libavfilter itself assigns no semantics to this parameter. May be NULL. + + + Check validity and configure all the links and formats in the graph. + the filter graph + context used for logging + + + Create and add a filter instance into an existing graph. The filter instance is created from the filter filt and inited with the parameters args and opaque. + the instance name to give to the created filter instance + the filter graph + + + Dump a graph into a human-readable string representation. + the graph to dump + formatting options; currently ignored + + + Free a graph, destroy its links, and set *graph to NULL. If *graph is NULL, do nothing. + + + Get a filter instance identified by instance name from graph. + filter graph to search through. + filter instance name (should be unique in the graph). + + + Add a graph described by a string to a graph. + the filter graph where to link the parsed graph context + string to be parsed + linked list to the inputs of the graph + linked list to the outputs of the graph + + + Add a graph described by a string to a graph. + the filter graph where to link the parsed graph context + string to be parsed + pointer to a linked list to the inputs of the graph, may be NULL. If non-NULL, *inputs is updated to contain the list of open inputs after the parsing, should be freed with avfilter_inout_free(). + pointer to a linked list to the outputs of the graph, may be NULL. If non-NULL, *outputs is updated to contain the list of open outputs after the parsing, should be freed with avfilter_inout_free(). + + + Add a graph described by a string to a graph. + the filter graph where to link the parsed graph context + string to be parsed + a linked list of all free (unlinked) inputs of the parsed graph will be returned here. It is to be freed by the caller using avfilter_inout_free(). + a linked list of all free (unlinked) outputs of the parsed graph will be returned here. It is to be freed by the caller using avfilter_inout_free(). + + + Queue a command for one or more filter instances. + the filter graph + the filter(s) to which the command should be sent "all" sends to all filters otherwise it can be a filter or filter instance name which will send the command to all matching filters. + the command to sent, for handling simplicity all commands must be alphanumeric only + the argument for the command + time at which the command should be sent to the filter + + + Request a frame on the oldest sink link. + + + Send a command to one or more filter instances. + the filter graph + the filter(s) to which the command should be sent "all" sends to all filters otherwise it can be a filter or filter instance name which will send the command to all matching filters. + the command to send, for handling simplicity all commands must be alphanumeric only + the argument for the command + a buffer with size res_size where the filter(s) can return a response. + + + Enable or disable automatic format conversion inside the graph. + any of the AVFILTER_AUTO_CONVERT_* constants + + + Initialize a filter with the supplied dictionary of options. + uninitialized filter context to initialize + An AVDictionary filled with options for this filter. On return this parameter will be destroyed and replaced with a dict containing options that were not found. This dictionary must be freed by the caller. May be NULL, then this function is equivalent to avfilter_init_str() with the second parameter set to NULL. + + + Initialize a filter. + the filter to initialize + A string of parameters to use when initializing the filter. The format and meaning of this string varies by filter. + Any extra non-string data needed by the filter. The meaning of this parameter varies by filter. + + + Initialize a filter with the supplied parameters. + uninitialized filter context to initialize + Options to initialize the filter with. This must be a ':'-separated list of options in the 'key=value' form. May be NULL if the options have been set directly using the AVOptions API or there are no options that need to be set. + + + Allocate a single AVFilterInOut entry. Must be freed with avfilter_inout_free(). + + + Free the supplied list of AVFilterInOut and set *inout to NULL. If *inout is NULL, do nothing. + + + Insert a filter in the middle of an existing link. + the link into which the filter should be inserted + the filter to be inserted + the input pad on the filter to connect + the output pad on the filter to connect + + + Return the libavfilter license. + + + Link two filters together. + the source filter + index of the output pad on the source filter + the destination filter + index of the input pad on the destination filter + + + Free the link in *link, and set its pointer to NULL. + + + Get the number of channels of a link. + + + Set the closed field of a link. + + + Iterate over all registered filters. + + + Create a filter instance. + put here a pointer to the created filter context on success, NULL on failure + the filter to create an instance of + Name to give to the new instance. Can be NULL for none. + + + Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. AVFilter.inputs/outputs). + + + Get the name of an AVFilterPad. + an array of AVFilterPads + index of the pad in the array it; is the caller's responsibility to ensure the index is valid + + + Get the type of an AVFilterPad. + an array of AVFilterPads + index of the pad in the array; it is the caller's responsibility to ensure the index is valid + + + Make the filter instance process a command. It is recommended to use avfilter_graph_send_command(). + + + Register a filter. This is only needed if you plan to use avfilter_get_by_name later to lookup the AVFilter structure by name. A filter can still by instantiated with avfilter_graph_alloc_filter even if it is not registered. + the filter to register + + + Initialize the filter system. Register all builtin filters. + + + Uninitialize the filter system. Unregister all filters. + + + Return the LIBAVFILTER_VERSION_INT constant. + + + Add an index entry into a sorted list. Update the entry if the list already contains it. + timestamp in the time base of the given stream + + + Read data and append it to the current content of the AVPacket. If pkt->size is 0 this is identical to av_get_packet. Note that this uses av_grow_packet and thus involves a realloc which is inefficient. Thus this function should only be used when there is no reasonable way to know (an upper bound of) the final size. + associated IO context + packet + amount of data to read + + + Get the AVCodecID for the given codec tag tag. If no codec id is found returns AV_CODEC_ID_NONE. + list of supported codec_id-codec_tag pairs, as stored in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + codec tag to match to a codec ID + + + Get the codec tag for the given codec id id. If no codec tag is found returns 0. + list of supported codec_id-codec_tag pairs, as stored in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + codec ID to match to a codec tag + + + Get the codec tag for the given codec id. + list of supported codec_id - codec_tag pairs, as stored in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + codec id that should be searched for in the list + A pointer to the found tag + + + Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base. + the context to analyze + index of the stream to dump information about + the URL to print, such as source or destination file + Select whether the specified context is an input(0) or output(1) + + + Check whether filename actually is a numbered sequence generator. + possible numbered sequence string + + + Find the "best" stream in the file. The best stream is determined according to various heuristics as the most likely to be what the user expects. If the decoder parameter is non-NULL, av_find_best_stream will find the default decoder for the stream's codec; streams for which no decoder can be found are ignored. + media file handle + stream type: video, audio, subtitles, etc. + user-requested stream number, or -1 for automatic selection + try to find a stream related (eg. in the same program) to this one, or -1 if none + if non-NULL, returns the decoder for the selected stream + flags; none are currently defined + + + Find AVInputFormat based on the short name of the input format. + + + Find the programs which belong to a given stream. + media file handle + the last found program, the search will start after this program, or from the beginning if it is NULL + stream index + + + Returns the method used to set ctx->duration. + + + Accessors for some AVFormatContext fields. These used to be provided for ABI compatibility, and do not need to be used anymore. + + + This function will cause global side data to be injected in the next packet of each stream as well as after any subsequent seek. + + + Return in 'buf' the path with '%d' replaced by a number. + destination buffer + destination buffer size + numbered sequence string + frame number + AV_FRAME_FILENAME_FLAGS_* + + + Get timing information for the data currently output. The exact meaning of "currently output" depends on the format. It is mostly relevant for devices that have an internal buffer and/or work in real time. + media file handle + stream in the media file + DTS of the last packet output for the stream, in stream time_base units + absolute time when that packet whas output, in microsecond + + + Allocate and read the payload of a packet and initialize its fields with default values. + associated IO context + packet + desired payload size + + + Guess the codec ID based upon muxer and filename. + + + Return the output format in the list of registered output formats which best matches the provided parameters, or return NULL if there is no match. + if non-NULL checks if short_name matches with the names of the registered formats + if non-NULL checks if filename terminates with the extensions of the registered formats + if non-NULL checks if mime_type matches with the MIME type of the registered formats + + + Guess the frame rate, based on both the container and codec information. + the format context which the stream is part of + the stream which the frame is part of + the frame for which the frame rate should be determined, may be NULL + + + Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio. + the format context which the stream is part of + the stream which the frame is part of + the frame with the aspect ratio to be determined + + + Send a nice hexadecimal dump of a buffer to the specified file stream. + The file stream pointer where the dump should be sent to. + buffer + buffer size + + + Send a nice hexadecimal dump of a buffer to the log. + A pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct. + The importance level of the message, lower values signifying higher importance. + buffer + buffer size + + + If f is NULL, returns the first registered input format, if f is non-NULL, returns the next registered input format after f or NULL if f is the last one. + + + Get the index for a specific timestamp. + stream that the timestamp belongs to + timestamp to retrieve the index for + if AVSEEK_FLAG_BACKWARD then the returned index will correspond to the timestamp which is < = the requested one, if backward is 0, then it will be >= if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + + + Write a packet to an output media file ensuring correct interleaving. + media file handle + The packet containing the data to be written. If the packet is reference-counted, this function will take ownership of this reference and unreference it later when it sees fit. The caller must not access the data through this reference after this function returns. If the packet is not reference-counted, libavformat will make a copy. This parameter can be NULL (at any time, not just at the end), to flush the interleaving queues. Packet's + + + Write an uncoded frame to an output media file. + + + Return a positive value if the given filename has one of the given extensions, 0 otherwise. + file name to check against the given extensions + a comma-separated list of filename extensions + + + If f is NULL, returns the first registered output format, if f is non-NULL, returns the next registered output format after f or NULL if f is the last one. + + + Send a nice dump of a packet to the log. + A pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct. + The importance level of the message, lower values signifying higher importance. + packet to dump + True if the payload must be displayed, too. + AVStream that the packet belongs to + + + Send a nice dump of a packet to the specified file stream. + The file stream pointer where the dump should be sent to. + packet to dump + True if the payload must be displayed, too. + AVStream that the packet belongs to + + + Like av_probe_input_buffer2() but returns 0 on success + + + Probe a bytestream to determine the input format. Each time a probe returns with a score that is too low, the probe buffer size is increased and another attempt is made. When the maximum probe size is reached, the input format with the highest score is returned. + the bytestream to probe + the input format is put here + the url of the stream + the log context + the offset within the bytestream to probe from + the maximum probe buffer size (zero for default) + + + Guess the file format. + data to be probed + Whether the file is already opened; determines whether demuxers with or without AVFMT_NOFILE are probed. + + + Guess the file format. + data to be probed + Whether the file is already opened; determines whether demuxers with or without AVFMT_NOFILE are probed. + A probe score larger that this is required to accept a detection, the variable is set to the actual detection score afterwards. If the score is < = AVPROBE_SCORE_MAX / 4 it is recommended to retry with a larger probe buffer. + + + Guess the file format. + Whether the file is already opened; determines whether demuxers with or without AVFMT_NOFILE are probed. + The score of the best detection. + + + Return the next frame of a stream. This function returns what is stored in the file, and does not validate that what is there are valid frames for the decoder. It will split what is stored in the file into frames and return one for each call. It will not omit invalid data between valid frames so as to give the decoder the maximum information possible for decoding. + + + Pause a network-based stream (e.g. RTSP stream). + + + Start playing a network-based stream (e.g. RTSP stream) at the current position. + + + Initialize libavformat and register all the muxers, demuxers and protocols. If you do not call this function, then you can select exactly which formats you want to support. + + + Generate an SDP for an RTP session. + array of AVFormatContexts describing the RTP streams. If the array is composed by only one context, such context can contain multiple AVStreams (one AVStream per RTP stream). Otherwise, all the contexts in the array (an AVCodecContext per RTP stream) must contain only one AVStream. + number of AVCodecContexts contained in ac + buffer where the SDP will be stored (must be allocated by the caller) + the size of the buffer + + + Seek to the keyframe at timestamp. 'timestamp' in 'stream_index'. + media file handle + If stream_index is (-1), a default stream is selected, and timestamp is automatically converted from AV_TIME_BASE units to the stream specific time_base. + Timestamp in AVStream.time_base units or, if no stream is specified, in AV_TIME_BASE units. + flags which select direction and seeking mode + + + Wrap an existing array as stream side data. + stream + side information type + the side data array. It must be allocated with the av_malloc() family of functions. The ownership of the data is transferred to st. + side information size + + + Get the internal codec timebase from a stream. + input stream to extract the timebase from + + + Returns the pts of the last muxed packet + its duration + + + Allocate new information from stream. + stream + desired side information type + side information size + + + Split a URL string into components. + the buffer for the protocol + the size of the proto buffer + the buffer for the authorization + the size of the authorization buffer + the buffer for the host name + the size of the hostname buffer + a pointer to store the port number in + the buffer for the path + the size of the path buffer + the URL to split + + + Write a packet to an output media file. + media file handle + The packet containing the data to be written. Note that unlike av_interleaved_write_frame(), this function does not take ownership of the packet passed to it (though some muxers may make an internal reference to the input packet). This parameter can be NULL (at any time, not just at the end), in order to immediately flush data buffered within the muxer, for muxers that buffer up data internally before writing it to the output. Packet's + + + Write the stream trailer to an output media file and free the file private data. + media file handle + + + Write an uncoded frame to an output media file. + + + Test whether a muxer supports uncoded frame. + + + Allocate an AVFormatContext. avformat_free_context() can be used to free the context and everything allocated by the framework within it. + + + Allocate an AVFormatContext for an output format. avformat_free_context() can be used to free the context and everything allocated by the framework within it. + format to use for allocating the context, if NULL format_name and filename are used instead + the name of output format to use for allocating the context, if NULL filename is used instead + the name of the filename to use for allocating the context, may be NULL + + + Close an opened input AVFormatContext. Free it and all its contents and set *s to NULL. + + + Return the libavformat build-time configuration. + + + Read packets of a media file to get stream information. This is useful for file formats with no headers such as MPEG. This function also computes the real framerate in case of MPEG-2 repeat frame mode. The logical file position is not changed by this function; examined packets may be buffered for later processing. + media file handle + If non-NULL, an ic.nb_streams long array of pointers to dictionaries, where i-th member contains options for codec corresponding to i-th stream. On return each dictionary will be filled with options that were not found. + + + Discard all internally buffered data. This can be useful when dealing with discontinuities in the byte stream. Generally works only with formats that can resync. This includes headerless formats like MPEG-TS/TS but should also work with NUT, Ogg and in a limited way AVI for example. + media file handle + + + Free an AVFormatContext and all its streams. + context to free + + + Get the AVClass for AVFormatContext. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Returns the table mapping MOV FourCCs for audio to AVCodecID. + + + Returns the table mapping MOV FourCCs for video to libavcodec AVCodecID. + + + Returns the table mapping RIFF FourCCs for audio to AVCodecID. + + + @{ Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the following code: + + + Allocate the stream private data and initialize the codec, but do not write the header. May optionally be used before avformat_write_header to initialize stream parameters before actually writing the header. If using this function, do not pass the same options to avformat_write_header. + Media file handle, must be allocated with avformat_alloc_context(). Its oformat field must be set to the desired output format; Its pb field must be set to an already opened AVIOContext. + An AVDictionary filled with AVFormatContext and muxer-private options. On return this parameter will be destroyed and replaced with a dict containing options that were not found. May be NULL. + + + Return the libavformat license. + + + Check if the stream st contained in s is matched by the stream specifier spec. + + + Undo the initialization done by avformat_network_init. + + + Do global initialization of network components. This is optional, but recommended, since it avoids the overhead of implicitly doing the setup for each session. + + + Add a new stream to a media file. + media file handle + If non-NULL, the AVCodecContext corresponding to the new stream will be initialized to use this codec. This is needed for e.g. codec-specific defaults to be set, so codec should be provided if it is known. + + + Open an input stream and read the header. The codecs are not opened. The stream must be closed with avformat_close_input(). + Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). May be a pointer to NULL, in which case an AVFormatContext is allocated by this function and written into ps. Note that a user-supplied AVFormatContext will be freed on failure. + URL of the stream to open. + If non-NULL, this parameter forces a specific input format. Otherwise the format is autodetected. + A dictionary filled with AVFormatContext and demuxer-private options. On return this parameter will be destroyed and replaced with a dict containing options that were not found. May be NULL. + + + Test if the given container can store a codec. + container to check for compatibility + codec to potentially store in container + standards compliance level, one of FF_COMPLIANCE_* + + + Seek to timestamp ts. Seeking will be done so that the point from which all active streams can be presented successfully will be closest to ts and within min/max_ts. Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + media file handle + index of the stream which is used as time base reference + smallest acceptable timestamp + target timestamp + largest acceptable timestamp + flags + + + Transfer internal timing information from one stream to another. + target output format for ost + output stream which needs timings copy and adjustments + reference input stream to copy timings from + define from where the stream codec timebase needs to be imported + + + Return the LIBAVFORMAT_VERSION_INT constant. + + + Allocate the stream private data and write the stream header to an output media file. + Media file handle, must be allocated with avformat_alloc_context(). Its oformat field must be set to the desired output format; Its pb field must be set to an already opened AVIOContext. + An AVDictionary filled with AVFormatContext and muxer-private options. On return this parameter will be destroyed and replaced with a dict containing options that were not found. May be NULL. + + + Accept and allocate a client context on a server context. + the server context + the client context, must be unallocated + + + Allocate and initialize an AVIOContext for buffered I/O. It must be later freed with av_free(). + Memory block for input/output operations via AVIOContext. The buffer must be allocated with av_malloc() and friends. It may be freed and replaced with a new buffer by libavformat. AVIOContext.buffer holds the buffer currently in use, which must be later freed with av_free(). + The buffer size is very important for performance. For protocols with fixed blocksize it should be set to this blocksize. For others a typical size is a cache page, e.g. 4kb. + Set to 1 if the buffer should be writable, 0 otherwise. + An opaque pointer to user-specific data. + A function for refilling the buffer, may be NULL. + A function for writing the buffer contents, may be NULL. The function may not change the input buffers content. + A function for seeking to specified byte position, may be NULL. + + + Return AVIO_FLAG_* access flags corresponding to the access permissions of the resource in url, or a negative value corresponding to an AVERROR code in case of failure. The returned access flags are masked by the value in flags. + + + Close the resource accessed by the AVIOContext s and free it. This function can only be used if s was opened by avio_open(). + + + Close directory. + directory read context. + + + Return the written size and a pointer to the buffer. The buffer must be freed with av_free(). Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + IO context + pointer to a byte buffer + + + Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL. This function can only be used if s was opened by avio_open(). + + + Iterate through names of available protocols. + A private pointer representing current protocol. It must be a pointer to NULL on first iteration and will be updated by successive calls to avio_enum_protocols. + If set to 1, iterate over output protocols, otherwise over input protocols. + + + feof() equivalent for AVIOContext. + + + Return the name of the protocol that will handle the passed URL. + + + Force flushing of buffered data. + + + Free entry allocated by avio_read_dir(). + entry to be freed. + + + Return the written size and a pointer to the buffer. The AVIOContext stream is left intact. The buffer must NOT be freed. No padding is added to the buffer. + IO context + pointer to a byte buffer + + + Read a string from pb into buf. The reading will terminate when either a NULL character was encountered, maxlen bytes have been read, or nothing more can be read from pb. The result is guaranteed to be NULL-terminated, it will be truncated if buf is too small. Note that the string is not interpreted or validated in any way, it might get truncated in the middle of a sequence for multi-byte encodings. + + + Read a UTF-16 string from pb and convert it to UTF-8. The reading will terminate when either a null or invalid character was encountered or maxlen bytes have been read. + + + Perform one step of the protocol handshake to accept a new client. This function must be called on a client returned by avio_accept() before using it as a read/write context. It is separate from avio_accept() because it may block. A step of the handshake is defined by places where the application may decide to change the proceedings. For example, on a protocol with a request header and a reply header, each one can constitute a step because the application may use the parameters from the request to change parameters in the reply; or each individual chunk of the request can constitute a step. If the handshake is already finished, avio_handshake() does nothing and returns 0 immediately. + the client context to perform the handshake on + + + Create and initialize a AVIOContext for accessing the resource indicated by url. + Used to return the pointer to the created AVIOContext. In case of failure the pointed to value is set to NULL. + resource to access + flags which control how the resource indicated by url is to be opened + + + Open directory for reading. + directory read context. Pointer to a NULL pointer must be passed. + directory to be listed. + A dictionary filled with protocol-private options. On return this parameter will be destroyed and replaced with a dictionary containing options that were not found. May be NULL. + + + Open a write only memory stream. + new IO context + + + Create and initialize a AVIOContext for accessing the resource indicated by url. + Used to return the pointer to the created AVIOContext. In case of failure the pointed to value is set to NULL. + resource to access + flags which control how the resource indicated by url is to be opened + an interrupt callback to be used at the protocols level + A dictionary filled with protocol-private options. On return this parameter will be destroyed and replaced with a dict containing options that were not found. May be NULL. + + + Pause and resume playing - only meaningful if using a network streaming protocol (e.g. MMS). + IO context from which to call the read_pause function pointer + 1 for pause, 0 for resume + + + Write a NULL-terminated string. + + + Convert an UTF-8 string to UTF-16BE and write it. + the AVIOContext + NULL-terminated UTF-8 string + + + Convert an UTF-8 string to UTF-16LE and write it. + the AVIOContext + NULL-terminated UTF-8 string + + + @{ + + + Read size bytes from AVIOContext into buf. + + + Get next directory entry. + directory read context. + next entry or NULL when no more entries. + + + Read contents of h into print buffer, up to max_size bytes, or up to EOF. + + + fseek() equivalent for AVIOContext. + + + Seek to a given timestamp relative to some component stream. Only meaningful if using a network streaming protocol (e.g. MMS.). + IO context from which to call the seek function pointers + The stream index that the timestamp is relative to. If stream_index is (-1) the timestamp should be in AV_TIME_BASE units from the beginning of the presentation. If a stream_index >= 0 is used and the protocol does not support seeking based on component streams, the call will fail. + timestamp in AVStream.time_base units or if there is no stream specified then in AV_TIME_BASE units. + Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE and AVSEEK_FLAG_ANY. The protocol may silently ignore AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will fail if used and not supported. + + + Get the filesize. + + + Skip given number of bytes forward + + + Mark the written bytestream as a specific type. + the stream time the current bytestream pos corresponds to (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not applicable + the kind of data written starting at the current pos + + + Delete a resource. + resource to be deleted. + + + Move or rename a resource. + url to resource to be moved + new url to resource if the operation succeeded + + + Add two rationals. + First rational + Second rational + + + Add a value to a timestamp. + Input timestamp time base + Input timestamp + Time base of `inc` + Value to be added + + + Allocate an AVAudioFifo. + sample format + number of channels + initial allocation size, in samples + + + Drain data from an AVAudioFifo. + AVAudioFifo to drain + number of samples to drain + + + Free an AVAudioFifo. + AVAudioFifo to free + + + Peek data from an AVAudioFifo. + AVAudioFifo to read from + audio data plane pointers + number of samples to peek + + + Peek data from an AVAudioFifo. + AVAudioFifo to read from + audio data plane pointers + number of samples to peek + offset from current read position + + + Read data from an AVAudioFifo. + AVAudioFifo to read from + audio data plane pointers + number of samples to read + + + Reallocate an AVAudioFifo. + AVAudioFifo to reallocate + new allocation size, in samples + + + Reset the AVAudioFifo buffer. + AVAudioFifo to reset + + + Get the current number of samples in the AVAudioFifo available for reading. + the AVAudioFifo to query + + + Get the current number of samples in the AVAudioFifo available for writing. + the AVAudioFifo to query + + + Write data to an AVAudioFifo. + AVAudioFifo to write to + audio data plane pointers + number of samples to write + + + Append a description of a channel layout to a bprint buffer. + + + Allocate an AVBuffer of the given size using av_malloc(). + + + Same as av_buffer_alloc(), except the returned buffer will be initialized to zero. + + + Create an AVBuffer from an existing array. + data array + size of data in bytes + a callback for freeing this buffer's data + parameter to be got for processing or passed to free + a combination of AV_BUFFER_FLAG_* + + + Default free callback, which calls av_free() on the buffer data. This function is meant to be passed to av_buffer_create(), not called directly. + + + Returns the opaque parameter set by av_buffer_create. + + + Returns 1 if the caller may write to the data referred to by buf (which is true if and only if buf is the only reference to the underlying AVBuffer). Return 0 otherwise. A positive answer is valid until av_buffer_ref() is called on buf. + + + Create a writable reference from a given buffer reference, avoiding data copy if possible. + buffer reference to make writable. On success, buf is either left untouched, or it is unreferenced and a new writable AVBufferRef is written in its place. On failure, buf is left untouched. + + + Allocate a new AVBuffer, reusing an old buffer from the pool when available. This function may be called simultaneously from multiple threads. + + + Allocate and initialize a buffer pool. + size of each buffer in this pool + a function that will be used to allocate new buffers when the pool is empty. May be NULL, then the default allocator will be used (av_buffer_alloc()). + + + Allocate and initialize a buffer pool with a more complex allocator. + size of each buffer in this pool + arbitrary user data used by the allocator + a function that will be used to allocate new buffers when the pool is empty. + a function that will be called immediately before the pool is freed. I.e. after av_buffer_pool_uninit() is called by the caller and all the frames are returned to the pool and freed. It is intended to uninitialize the user opaque data. + + + Mark the pool as being available for freeing. It will actually be freed only once all the allocated buffers associated with the pool are released. Thus it is safe to call this function while some of the allocated buffers are still in use. + pointer to the pool to be freed. It will be set to NULL. + + + Reallocate a given buffer. + a buffer reference to reallocate. On success, buf will be unreferenced and a new reference with the required size will be written in its place. On failure buf will be left untouched. *buf may be NULL, then a new buffer is allocated. + required new buffer size. + + + Create a new reference to an AVBuffer. + + + Free a given reference and automatically free the buffer if there are no more references to it. + the reference to be freed. The pointer is set to NULL on return. + + + Non-inlined equivalent of av_mallocz_array(). + + + Get the channel with the given index in channel_layout. + + + Returns the name for provided chroma location or NULL if unknown. + + + Returns the name for provided color primaries or NULL if unknown. + + + Returns the name for provided color range or NULL if unknown. + + + Returns the name for provided color space or NULL if unknown. + + + Returns the name for provided color transfer or NULL if unknown. + + + Compare the remainders of two integer operands divided by a common divisor. + Divisor; must be a power of 2 + + + Compare two timestamps each in its own time base. + + + Returns the number of logical CPU cores present. + + + Convert a double precision floating point number to a rational. + `double` to convert + Maximum allowed numerator and denominator + + + Return the context name + The AVClass context + + + Copy entries from one AVDictionary struct into another. + pointer to a pointer to a AVDictionary struct. If *dst is NULL, this function will allocate a struct for you and put it in *dst + pointer to source AVDictionary struct + flags to use when setting entries in *dst + + + Get number of entries in dictionary. + dictionary + + + Free all the memory allocated for an AVDictionary struct and all keys and values. + + + Get a dictionary entry with matching key. + matching key + Set to the previous matching element to find the next. If set to NULL the first matching element is returned. + a collection of AV_DICT_* flags controlling how the entry is retrieved + + + Get dictionary entries as a string. + dictionary + Pointer to buffer that will be allocated with string containg entries. Buffer must be freed by the caller when is no longer needed. + character used to separate key from value + character used to separate two pairs from each other + + + Parse the key/value pairs list and add the parsed entries to a dictionary. + a 0-terminated list of characters used to separate key from value + a 0-terminated list of characters used to separate two pairs from each other + flags to use when adding to dictionary. AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL are ignored since the key/value tokens will always be duplicated. + + + Set the given entry in *pm, overwriting an existing entry. + pointer to a pointer to a dictionary struct. If *pm is NULL a dictionary struct is allocated and put in *pm. + entry key to add to *pm (will either be av_strduped or added as a new key depending on flags) + entry value to add to *pm (will be av_strduped or added as a new key depending on flags). Passing a NULL value will cause an existing entry to be deleted. + + + Convenience wrapper for av_dict_set that converts the value to a string and stores it. + + + Divide one rational by another. + First rational + Second rational + + + Add the pointer to an element to a dynamic array. + Pointer to the array to grow + Pointer to the number of elements in the array + Element to add + + + Add an element to a dynamic array. + + + Add an element of size `elem_size` to a dynamic array. + Pointer to the array to grow + Pointer to the number of elements in the array + Size in bytes of an element in the array + Pointer to the data of the element to add. If `NULL`, the space of the newly added element is allocated but left uninitialized. + + + Allocate a buffer, reusing the given one if large enough. + Pointer to pointer to an already allocated buffer. `*ptr` will be overwritten with pointer to new buffer on success or `NULL` on failure + Pointer to current size of buffer `*ptr`. `*size` is changed to `min_size` in case of success or 0 in case of failure + New size of buffer `*ptr` + + + Allocate and clear a buffer, reusing the given one if large enough. + Pointer to pointer to an already allocated buffer. `*ptr` will be overwritten with pointer to new buffer on success or `NULL` on failure + Pointer to current size of buffer `*ptr`. `*size` is changed to `min_size` in case of success or 0 in case of failure + New size of buffer `*ptr` + + + Reallocate the given buffer if it is not large enough, otherwise do nothing. + Already allocated buffer, or `NULL` + Pointer to current size of buffer `ptr`. `*size` is changed to `min_size` in case of success or 0 in case of failure + New size of buffer `ptr` + + + Initialize an AVFifoBuffer. + of FIFO + + + Initialize an AVFifoBuffer. + number of elements + size of the single element + + + Read and discard the specified amount of data from an AVFifoBuffer. + AVFifoBuffer to read from + amount of data to read in bytes + + + Free an AVFifoBuffer. + AVFifoBuffer to free + + + Free an AVFifoBuffer and reset pointer to NULL. + AVFifoBuffer to free + + + Feed data from an AVFifoBuffer to a user-supplied callback. Similar as av_fifo_gereric_read but without discarding data. + AVFifoBuffer to read from + data destination + number of bytes to read + generic read function + + + Feed data at specific position from an AVFifoBuffer to a user-supplied callback. Similar as av_fifo_gereric_read but without discarding data. + AVFifoBuffer to read from + data destination + offset from current read position + number of bytes to read + generic read function + + + Feed data from an AVFifoBuffer to a user-supplied callback. + AVFifoBuffer to read from + data destination + number of bytes to read + generic read function + + + Feed data from a user-supplied callback to an AVFifoBuffer. + AVFifoBuffer to write to + data source; non-const since it may be used as a modifiable context by the function defined in func + number of bytes to write + generic write function; the first parameter is src, the second is dest_buf, the third is dest_buf_size. func must return the number of bytes written to dest_buf, or < = 0 to indicate no more data available to write. If func is NULL, src is interpreted as a simple byte array for source data. + + + Enlarge an AVFifoBuffer. In case of reallocation failure, the old FIFO is kept unchanged. The new fifo size may be larger than the requested size. + AVFifoBuffer to resize + the amount of space in bytes to allocate in addition to av_fifo_size() + + + Resize an AVFifoBuffer. In case of reallocation failure, the old FIFO is kept unchanged. + AVFifoBuffer to resize + new AVFifoBuffer size in bytes + + + Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. + AVFifoBuffer to reset + + + Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from it. + AVFifoBuffer to read from + + + Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write into it. + AVFifoBuffer to write into + + + Compute what kind of losses will occur when converting from one specific pixel format to another. When converting from one pixel format to another, information loss may occur. For example, when converting from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when converting from some formats to other formats. These losses can involve loss of chroma, but also loss of resolution, loss of color depth, loss due to the color space conversion, loss of the alpha bits or loss due to color quantization. av_get_fix_fmt_loss() informs you about the various types of losses which will occur when converting from one pixel format to another. + source pixel format + Whether the source pixel format alpha channel is used. + + + Find the value in a list of rationals nearest a given reference rational. + Reference rational + Array of rationals terminated by `{0, 0}` + + + Open a file using a UTF-8 filename. The API of this function matches POSIX fopen(), errors are returned through errno. + + + Disables cpu detection and forces the specified flags. -1 is a special case that disables forcing of specific flags. + + + Fill the provided buffer with a string containing a FourCC (four-character code) representation. + a buffer with size in bytes of at least AV_FOURCC_MAX_STRING_SIZE + the fourcc to represent + + + Allocate an AVFrame and set its fields to default values. The resulting struct must be freed using av_frame_free(). + + + Create a new frame that references the same data as src. + + + Copy the frame data from src to dst. + + + Copy only "metadata" fields from src to dst. + + + Free the frame and any dynamically allocated objects in it, e.g. extended_data. If the frame is reference counted, it will be unreferenced first. + frame to be freed. The pointer will be set to NULL. + + + Accessors for some AVFrame fields. These used to be provided for ABI compatibility, and do not need to be used anymore. + + + Allocate new buffer(s) for audio or video data. + frame in which to store the new buffers. + required buffer size alignment + + + Get the buffer reference a given data plane is stored in. + index of the data plane of interest in frame->extended_data. + + + Returns a pointer to the side data of a given type on success, NULL if there is no side data with such type in this frame. + + + Check if the frame data is writable. + + + Ensure that the frame data is writable, avoiding data copy if possible. + + + Move everything contained in src to dst and reset src. + + + Add a new side data to a frame. + a frame to which the side data should be added + type of the added side data + size of the side data + + + Set up a new reference to the data described by the source frame. + + + If side data of the supplied type exists in the frame, free it and remove it from the frame. + + + Returns a string identifying the side data type + + + Unreference all the buffers referenced by frame and reset the frame fields. + + + Free a memory block which has been allocated with a function of av_malloc() or av_realloc() family. + Pointer to the memory block which should be freed. + + + Free a memory block which has been allocated with a function of av_malloc() or av_realloc() family, and set the pointer pointing to it to `NULL`. + Pointer to the pointer to the memory block which should be freed + + + Compute the greatest common divisor of two integer operands. + + + Return the planar<->packed alternative form of the given sample format, or AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the requested planar/packed format, the format returned is the same as the input. + + + Return the number of bits per pixel used by the pixel format described by pixdesc. Note that this is not the same as the number of bits per sample. + + + Return number of bytes per sample. + the sample format + + + Get the description of a given channel. + a channel layout with a single channel + + + Return a channel layout id that matches name, or 0 if no match is found. + + + Get the index of a channel in channel_layout. + a channel layout describing exactly one channel which must be present in channel_layout. + + + Return the number of channels in the channel layout. + + + Return a description of a channel layout. If nb_channels is <= 0, it is guessed from the channel_layout. + put here the string containing the channel layout + size in bytes of the buffer + + + Get the name of a given channel. + + + Get the name of a colorspace. + + + Return the flags which specify extensions supported by the CPU. The returned value is affected by av_force_cpu_flags() if that was used before. So av_get_cpu_flags() can easily be used in an application to detect the enabled cpu flags. + + + Return default channel layout for a given number of channels. + + + Return a channel layout and the number of channels based on the specified name. + channel layout specification string + parsed channel layout (0 if unknown) + number of channels + + + Return a string describing the media_type enum, NULL if media_type is unknown. + + + Get the packed alternative form of the given sample format. + + + Return the number of bits per pixel for the pixel format described by pixdesc, including any padding or unused bits. + + + Return a single letter to describe the given picture type pict_type. + the picture type + + + Return the pixel format corresponding to name. + + + Compute what kind of losses will occur when converting from one specific pixel format to another. When converting from one pixel format to another, information loss may occur. For example, when converting from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when converting from some formats to other formats. These losses can involve loss of chroma, but also loss of resolution, loss of color depth, loss due to the color space conversion, loss of the alpha bits or loss due to color quantization. av_get_fix_fmt_loss() informs you about the various types of losses which will occur when converting from one pixel format to another. + destination pixel format + source pixel format + Whether the source pixel format alpha channel is used. + + + Return the short name for a pixel format, NULL in case pix_fmt is unknown. + + + Print in buf the string corresponding to the pixel format with number pix_fmt, or a header if pix_fmt is negative. + the buffer where to write the string + the size of buf + the number of the pixel format to print the corresponding info string, or a negative value to print the corresponding header. + + + Get the planar alternative form of the given sample format. + + + Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE on error. + + + Return the name of sample_fmt, or NULL if sample_fmt is not recognized. + + + Generate a string corresponding to the sample format with sample_fmt, or a header if sample_fmt is negative. + the buffer where to write the string + the size of buf + the number of the sample format to print the corresponding info string, or a negative value to print the corresponding header. + + + Get the value and name of a standard channel layout. + index in an internal list, starting at 0 + channel layout mask + name of the layout + + + Return the fractional representation of the internal time base. + + + Allocate an AVHWDeviceContext for a given hardware type. + the type of the hardware device to allocate. + + + Open a device of the specified type and create an AVHWDeviceContext for it. + On success, a reference to the newly-created device context will be written here. The reference is owned by the caller and must be released with av_buffer_unref() when no longer needed. On failure, NULL will be written to this pointer. + The type of the device to create. + A type-specific string identifying the device to open. + A dictionary of additional (type-specific) options to use in opening the device. The dictionary remains owned by the caller. + currently unused + + + Finalize the device context before use. This function must be called after the context is filled with all the required information and before it is used in any way. + a reference to the AVHWDeviceContext + + + Get the constraints on HW frames given a device and the HW-specific configuration to be used with that device. If no HW-specific configuration is provided, returns the maximum possible capabilities of the device. + a filled HW-specific configuration structure, or NULL to return the maximum possible capabilities of the device. + + + Allocate a HW-specific configuration structure for a given HW device. After use, the user must free all members as required by the specific hardware structure being used, then free the structure itself with av_free(). + a reference to the associated AVHWDeviceContext. + + + Free an AVHWFrameConstraints structure. + The (filled or unfilled) AVHWFrameConstraints structure. + + + Allocate an AVHWFramesContext tied to a given device context. + a reference to a AVHWDeviceContext. This function will make a new reference for internal use, the one passed to the function remains owned by the caller. + + + Create and initialise an AVHWFramesContext as a mapping of another existing AVHWFramesContext on a different device. + On success, a reference to the newly created AVHWFramesContext. + A reference to the device to create the new AVHWFramesContext on. + A reference to an existing AVHWFramesContext which will be mapped to the derived context. + Currently unused; should be set to zero. + + + Finalize the context before use. This function must be called after the context is filled with all the required information and before it is attached to any frames. + a reference to the AVHWFramesContext + + + Allocate a new frame attached to the given AVHWFramesContext. + a reference to an AVHWFramesContext + an empty (freshly allocated or unreffed) frame to be filled with newly allocated buffers. + currently unused, should be set to zero + + + Map a hardware frame. + Destination frame, to contain the mapping. + Source frame, to be mapped. + Some combination of AV_HWFRAME_MAP_* flags. + + + Copy data to or from a hw surface. At least one of dst/src must have an AVHWFramesContext attached. + the destination frame. dst is not touched on failure. + the source frame. + currently unused, should be set to zero + + + Get a list of possible source or target formats usable in av_hwframe_transfer_data(). + the frame context to obtain the information for + the direction of the transfer + the pointer to the output format list will be written here. The list is terminated with AV_PIX_FMT_NONE and must be freed by the caller when no longer needed using av_free(). If this function returns successfully, the format list will have at least one item (not counting the terminator). On failure, the contents of this pointer are unspecified. + currently unused, should be set to zero + + + Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordingly. The allocated image buffer has to be freed by using av_freep(&pointers[0]). + the value to use for buffer size alignment + + + Check if the given sample aspect ratio of an image is valid. + width of the image + height of the image + sample aspect ratio of the image + + + Check if the given dimension of an image is valid, meaning that all bytes of the image can be addressed with a signed int. + the width of the picture + the height of the picture + the offset to sum to the log level for logging with log_ctx + the parent logging context, it may be NULL + + + Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with the specified pix_fmt can be addressed with a signed int. + the width of the picture + the height of the picture + the maximum number of pixels the user wants to accept + the pixel format, can be AV_PIX_FMT_NONE if unknown. + the offset to sum to the log level for logging with log_ctx + the parent logging context, it may be NULL + + + Copy image in src_data to dst_data. + linesizes for the image in dst_data + linesizes for the image in src_data + + + Copy image plane from src to dst. That is, copy "height" number of lines of "bytewidth" bytes each. The first byte of each successive line is separated by *_linesize bytes. + linesize for the image plane in dst + linesize for the image plane in src + + + Copy image data from an image into a buffer. + a buffer into which picture data will be copied + the size in bytes of dst + pointers containing the source image data + the pixel format of the source image + the width of the source image in pixels + the height of the source image in pixels + the assumed linesize alignment for dst + + + Copy image data located in uncacheable (e.g. GPU mapped) memory. Where available, this function will use special functionality for reading from such memory, which may result in greatly improved performance compared to plain av_image_copy(). + + + Setup the data pointers and linesizes based on the specified image parameters and the provided array. + data pointers to be filled in + buffer which will contain or contains the actual image data, can be NULL + the pixel format of the image + the width of the image in pixels + the height of the image in pixels + the value used in src for linesize alignment + + + Fill plane linesizes for an image with pixel format pix_fmt and width width. + array to be filled with the linesize for each plane + + + Compute the max pixel step for each plane of an image with a format described by pixdesc. + an array which is filled with the max pixel step for each plane. Since a plane may contain different pixel components, the computed max_pixsteps[plane] is relative to the component in the plane with the max pixel step. + an array which is filled with the component for each plane which has the max pixel step. May be NULL. + + + Fill plane data pointers for an image with pixel format pix_fmt and height height. + pointers array to be filled with the pointer for each image plane + the pointer to a buffer which will contain the image + the array containing the linesize for each plane, should be filled by av_image_fill_linesizes() + + + Return the size in bytes of the amount of data required to store an image with the given parameters. + the assumed linesize alignment + + + Compute the size of an image line with format pix_fmt and width width for the plane plane. + + + Compute the length of an integer list. + size in bytes of each list element (only 1, 2, 4 or 8) + pointer to the list + list terminator (usually 0 or -1) + + + Send the specified message to the log if the level is less than or equal to the current av_log_level. By default, all logging messages are sent to stderr. This behavior can be altered by setting a different logging callback function. + A pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct or NULL if general log. + The importance level of the message expressed using a + The format string (printf-compatible) that specifies how subsequent arguments are converted to output. + + + Default logging callback + A pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct. + The importance level of the message expressed using a + The format string (printf-compatible) that specifies how subsequent arguments are converted to output. + The arguments referenced by the format string. + + + Format a line of log the same way as the default callback. + buffer to receive the formatted line + size of the buffer + used to store whether the prefix must be printed; must point to a persistent integer initially set to 1 + + + Format a line of log the same way as the default callback. + buffer to receive the formatted line; may be NULL if line_size is 0 + size of the buffer; at most line_size-1 characters will be written to the buffer, plus one null terminator + used to store whether the prefix must be printed; must point to a persistent integer initially set to 1 + + + Get the current log level + + + Set the logging callback + A logging function with a compatible signature. + + + Set the log level + Logging level + + + Allocate a memory block with alignment suitable for all memory accesses (including vectors if available on the CPU). + Size in bytes for the memory block to be allocated + + + Allocate a memory block with alignment suitable for all memory accesses (including vectors if available on the CPU) and zero all the bytes of the block. + Size in bytes for the memory block to be allocated + + + Set the maximum size that may be allocated in one block. + Value to be set as the new maximum size + + + Overlapping memcpy() implementation. + Destination buffer + Number of bytes back to start copying (i.e. the initial size of the overlapping window); must be > 0 + Number of bytes to copy; must be >= 0 + + + Duplicate a buffer with av_malloc(). + Buffer to be duplicated + Size in bytes of the buffer copied + + + Multiply two rationals. + First rational + Second rational + + + Find which of the two rationals is closer to another rational. + Rational to be compared against + + + Iterate over potential AVOptions-enabled children of parent. + result of a previous call to this function or NULL + + + Iterate over AVOptions-enabled children of obj. + result of a previous call to this function or NULL + + + Copy options from src object into dest object. + Object to copy from + Object to copy into + + + @{ This group of functions can be used to evaluate option strings and get numbers out of them. They do the same thing as av_opt_set(), except the result is written into the caller-supplied pointer. + a struct whose first element is a pointer to AVClass. + an option for which the string is to be evaluated. + string to be evaluated. + + + Look for an option in an object. Consider only options which have all the specified flags set. + A pointer to a struct whose first element is a pointer to an AVClass. Alternatively a double pointer to an AVClass, if AV_OPT_SEARCH_FAKE_OBJ search flag is set. + The name of the option to look for. + When searching for named constants, name of the unit it belongs to. + Find only options with all the specified flags set (AV_OPT_FLAG). + A combination of AV_OPT_SEARCH_*. + + + Look for an option in an object. Consider only options which have all the specified flags set. + A pointer to a struct whose first element is a pointer to an AVClass. Alternatively a double pointer to an AVClass, if AV_OPT_SEARCH_FAKE_OBJ search flag is set. + The name of the option to look for. + When searching for named constants, name of the unit it belongs to. + Find only options with all the specified flags set (AV_OPT_FLAG). + A combination of AV_OPT_SEARCH_*. + if non-NULL, an object to which the option belongs will be written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present in search_flags. This parameter is ignored if search_flags contain AV_OPT_SEARCH_FAKE_OBJ. + + + Check whether a particular flag is set in a flags field. + the name of the flag field option + the name of the flag to check + + + Free all allocated objects in obj. + + + Free an AVOptionRanges struct and set it to NULL. + + + @{ Those functions get a value of the option with the given name from an object. + a struct whose first element is a pointer to an AVClass. + name of the option to get. + flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN is passed here, then the option may be found in a child of obj. + value of the option will be written here + + + The returned dictionary is a copy of the actual value and must be freed with av_dict_free() by the caller + + + Extract a key-value pair from the beginning of a string. + pointer to the options string, will be updated to point to the rest of the string (one of the pairs_sep or the final NUL) + a 0-terminated list of characters used to separate key from value, for example '=' + a 0-terminated list of characters used to separate two pairs from each other, for example ':' or ',' + flags; see the AV_OPT_FLAG_* values below + parsed key; must be freed using av_free() + parsed value; must be freed using av_free() + + + Check if given option is set to its default value. + AVClass object to check option on + option to be checked + + + Check if given option is set to its default value. + AVClass object to check option on + option name + combination of AV_OPT_SEARCH_* + + + Iterate over all AVOptions belonging to obj. + an AVOptions-enabled struct or a double pointer to an AVClass describing it. + result of the previous call to av_opt_next() on this object or NULL + + + @} + + + Get a list of allowed ranges for the given option. + is a bitmask of flags, undefined flags should not be set and should be ignored AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, + + + Get a default list of allowed ranges for the given option. + is a bitmask of flags, undefined flags should not be set and should be ignored AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, + + + Serialize object's options. + AVClass object to serialize + serialize options with all the specified flags set (AV_OPT_FLAG) + combination of AV_OPT_SERIALIZE_* flags + Pointer to buffer that will be allocated with string containg serialized options. Buffer must be freed by the caller when is no longer needed. + character used to separate key from value + character used to separate two pairs from each other + + + @{ Those functions set the field of obj with the given name to value. + A struct whose first element is a pointer to an AVClass. + the name of the field to set + The value to set. In case of av_opt_set() if the field is not of a string type, then the given string is parsed. SI postfixes and some named scalars are supported. If the field is of a numeric type, it has to be a numeric or named scalar. Behavior with more than one scalar and +- infix operators is undefined. If the field is of a flags type, it has to be a sequence of numeric scalars or named flags separated by '+' or '-'. Prefixing a flag with '+' causes it to be set without affecting the other flags; similarly, '-' unsets a flag. + flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN is passed here, then the option may be set on a child of obj. + + + Set the values of all AVOption fields to their default values. + an AVOption-enabled struct (its first member must be a pointer to AVClass) + + + Set the values of all AVOption fields to their default values. Only these AVOption fields for which (opt->flags & mask) == flags will have their default applied to s. + an AVOption-enabled struct (its first member must be a pointer to AVClass) + combination of AV_OPT_FLAG_* + combination of AV_OPT_FLAG_* + + + Set all the options from a given dictionary on an object. + a struct whose first element is a pointer to AVClass + options to process. This dictionary will be freed and replaced by a new one containing all options not found in obj. Of course this new dictionary needs to be freed by caller with av_dict_free(). + + + Set all the options from a given dictionary on an object. + a struct whose first element is a pointer to AVClass + options to process. This dictionary will be freed and replaced by a new one containing all options not found in obj. Of course this new dictionary needs to be freed by caller with av_dict_free(). + A combination of AV_OPT_SEARCH_*. + + + Parse the key-value pairs list in opts. For each key=value pair found, set the value of the corresponding option in ctx. + the AVClass object to set options on + the options string, key-value pairs separated by a delimiter + a NULL-terminated array of options names for shorthand notation: if the first field in opts has no key part, the key is taken from the first element of shorthand; then again for the second, etc., until either opts is finished, shorthand is finished or a named option is found; after that, all options must be named + a 0-terminated list of characters used to separate key from value, for example '=' + a 0-terminated list of characters used to separate two pairs from each other, for example ':' or ',' + + + Show the obj options. + log context to use for showing the options + requested flags for the options to show. Show only the options for which it is opt->flags & req_flags. + rejected flags for the options to show. Show only the options for which it is !(opt->flags & req_flags). + + + Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + + + Parse CPU flags from a string. + + + Returns number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a valid pixel format. + + + Returns a pixel format descriptor for provided pixel format or NULL if this pixel format is unknown. + + + Returns an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc is not a valid pointer to a pixel format descriptor. + + + Iterate over all pixel format descriptors known to libavutil. + previous descriptor. NULL to get the first descriptor. + + + Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor. + the pixel format + store log2_chroma_w (horizontal/width shift) + store log2_chroma_h (vertical/height shift) + + + Utility function to swap the endianness of a pixel format. + the pixel format + + + Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point format. + Rational to be converted + + + Read a line from an image, and write the values of the pixel format component c to dst. + the array containing the pointers to the planes of the image + the array containing the linesizes of the image + the pixel format descriptor for the image + the horizontal coordinate of the first pixel to read + the vertical coordinate of the first pixel to read + the width of the line to read, that is the number of values to write to dst + if not zero and the format is a paletted format writes the values corresponding to the palette component c in data[1] to dst, rather than the palette indexes in data[0]. The behavior is undefined if the format is not paletted. + + + Allocate, reallocate, or free a block of memory. + Pointer to a memory block already allocated with av_realloc() or `NULL` + Size in bytes of the memory block to be allocated or reallocated + + + Allocate, reallocate, or free an array. + Pointer to a memory block already allocated with av_realloc() or `NULL` + Number of elements in the array + Size of the single element of the array + + + Allocate, reallocate, or free a block of memory. + + + Allocate, reallocate, or free a block of memory through a pointer to a pointer. + Pointer to a pointer to a memory block already allocated with av_realloc(), or a pointer to `NULL`. The pointer is updated on success, or freed on failure. + Size in bytes for the memory block to be allocated or reallocated + + + Allocate, reallocate, or free an array through a pointer to a pointer. + Pointer to a pointer to a memory block already allocated with av_realloc(), or a pointer to `NULL`. The pointer is updated on success, or freed on failure. + Number of elements + Size of the single element + + + Reduce a fraction. + Destination numerator + Destination denominator + Source numerator + Source denominator + Maximum allowed values for `dst_num` & `dst_den` + + + Rescale a 64-bit integer with rounding to nearest. + + + Rescale a timestamp while preserving known durations. + Input time base + Input timestamp + Duration time base; typically this is finer-grained (greater) than `in_tb` and `out_tb` + Duration till the next call to this function (i.e. duration of the current packet/frame) + Pointer to a timestamp expressed in terms of `fs_tb`, acting as a state variable + Output timebase + + + Rescale a 64-bit integer by 2 rational numbers. + + + Rescale a 64-bit integer by 2 rational numbers with specified rounding. + + + Rescale a 64-bit integer with specified rounding. + + + Check if the sample format is planar. + the sample format to inspect + + + Allocate a samples buffer for nb_samples samples, and fill data pointers and linesize accordingly. The allocated samples buffer can be freed by using av_freep(&audio_data[0]) Allocated data will be initialized to silence. + array to be filled with the pointer for each channel + aligned size for audio buffer(s), may be NULL + number of audio channels + number of samples per channel + buffer size alignment (0 = default, 1 = no alignment) + + + Allocate a data pointers array, samples buffer for nb_samples samples, and fill data pointers and linesize accordingly. + + + Copy samples from src to dst. + destination array of pointers to data planes + source array of pointers to data planes + offset in samples at which the data will be written to dst + offset in samples at which the data will be read from src + number of samples to be copied + number of audio channels + audio sample format + + + Fill plane data pointers and linesize for samples with sample format sample_fmt. + array to be filled with the pointer for each channel + calculated linesize, may be NULL + the pointer to a buffer containing the samples + the number of channels + the number of samples in a single channel + the sample format + buffer size alignment (0 = default, 1 = no alignment) + + + Get the required buffer size for the given audio parameters. + calculated linesize, may be NULL + the number of channels + the number of samples in a single channel + the sample format + buffer size alignment (0 = default, 1 = no alignment) + + + Fill an audio buffer with silence. + array of pointers to data planes + offset in samples at which to start filling + number of samples to fill + number of audio channels + audio sample format + + + Set a mask on flags returned by av_get_cpu_flags(). This function is mainly useful for testing. Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible + + + Parse the key/value pairs list in opts. For each key/value pair found, stores the value in the field in ctx that is named like the key. ctx must be an AVClass context, storing is done using AVOptions. + options string to parse, may be NULL + a 0-terminated list of characters used to separate key from value + a 0-terminated list of characters used to separate two pairs from each other + + + Duplicate a string. + String to be duplicated + + + Put a description of the AVERROR code errnum in errbuf. In case of failure the global variable errno is set to indicate the error. Even in case of failure av_strerror() will print a generic error message indicating the errnum provided to errbuf. + error code to describe + buffer to which description is written + the size in bytes of errbuf + + + Duplicate a substring of a string. + String to be duplicated + Maximum length of the resulting string (not counting the terminating byte) + + + Subtract one rational from another. + First rational + Second rational + + + Adjust frame number for NTSC drop frame time code. + frame number to adjust + frame per second, 30 or 60 + + + Check if the timecode feature is available for the given frame rate + + + Convert frame number to SMPTE 12M binary representation. + timecode data correctly initialized + frame number + + + Init a timecode struct with the passed parameters. + pointer to an allocated AVTimecode + frame rate in rational form + miscellaneous flags such as drop frame, +24 hours, ... (see AVTimecodeFlag) + the first frame number + a pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct (used for av_log) + + + Parse timecode representation (hh:mm:ss[:;.]ff). + pointer to an allocated AVTimecode + frame rate in rational form + timecode string which will determine the frame start + a pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct (used for av_log). + + + Get the timecode string from the 25-bit timecode format (MPEG GOP format). + destination buffer, must be at least AV_TIMECODE_STR_SIZE long + the 25-bits timecode + + + Get the timecode string from the SMPTE timecode format. + destination buffer, must be at least AV_TIMECODE_STR_SIZE long + the 32-bit SMPTE timecode + prevent the use of a drop flag when it is known the DF bit is arbitrary + + + Load timecode string in buf. + timecode data correctly initialized + destination buffer, must be at least AV_TIMECODE_STR_SIZE long + frame number + + + Return an informative version string. This usually is the actual release version number or a git commit description. This string has no fixed format and can change any time. It should never be parsed by code. + + + Send the specified message to the log if the level is less than or equal to the current av_log_level. By default, all logging messages are sent to stderr. This behavior can be altered by setting a different logging callback function. + A pointer to an arbitrary struct of which the first field is a pointer to an AVClass struct. + The importance level of the message expressed using a + The format string (printf-compatible) that specifies how subsequent arguments are converted to output. + The arguments referenced by the format string. + + + Write the values from src to the pixel format component c of an image line. + array containing the values to write + the array containing the pointers to the planes of the image to write into. It is supposed to be zeroed. + the array containing the linesizes of the image + the pixel format descriptor for the image + the horizontal coordinate of the first pixel to write + the vertical coordinate of the first pixel to write + the width of the line to write, that is the number of values to write to the image line + + + Return the libavutil build-time configuration. + + + Return the libavutil license. + + + Return the LIBAVUTIL_VERSION_INT constant. + + + Return the libpostproc build-time configuration. + + + Return the libpostproc license. + + + Return the LIBPOSTPROC_VERSION_INT constant. + + + Return a pp_mode or NULL if an error occurred. + the string after "-pp" on the command line + a number from 0 to PP_QUALITY_MAX + + + Allocate SwrContext. + + + Allocate SwrContext if needed and set/reset common parameters. + existing Swr context if available, or NULL if not + output channel layout (AV_CH_LAYOUT_*) + output sample format (AV_SAMPLE_FMT_*). + output sample rate (frequency in Hz) + input channel layout (AV_CH_LAYOUT_*) + input sample format (AV_SAMPLE_FMT_*). + input sample rate (frequency in Hz) + logging level offset + parent logging context, can be NULL + + + Generate a channel mixing matrix. + input channel layout + output channel layout + mix level for the center channel + mix level for the surround channel(s) + mix level for the low-frequency effects channel + if 1.0, coefficients will be normalized to prevent overflow. if INT_MAX, coefficients will not be normalized. + mixing coefficients; matrix[i + stride * o] is the weight of input channel i in output channel o. + distance between adjacent input channels in the matrix array + matrixed stereo downmix mode (e.g. dplii) + parent logging context, can be NULL + + + Closes the context so that swr_is_initialized() returns 0. + Swr context to be closed + + + Configure or reconfigure the SwrContext using the information provided by the AVFrames. + audio resample context + + + Convert audio. + allocated Swr context, with parameters set + output buffers, only the first one need be set in case of packed audio + amount of space available for output in samples per channel + input buffers, only the first one need to be set in case of packed audio + number of input samples available in one channel + + + Convert the samples in the input AVFrame and write them to the output AVFrame. + audio resample context + output AVFrame + input AVFrame + + + Drops the specified number of output samples. + allocated Swr context + number of samples to be dropped + + + Free the given SwrContext and set the pointer to NULL. + a pointer to a pointer to Swr context + + + Get the AVClass for SwrContext. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Gets the delay the next input sample will experience relative to the next output sample. + swr context + timebase in which the returned delay will be: + + + Find an upper bound on the number of samples that the next swr_convert call will output, if called with in_samples of input samples. This depends on the internal state, and anything changing the internal state (like further swr_convert() calls) will may change the number of samples swr_get_out_samples() returns for the same number of input samples. + number of input samples. + + + Initialize context after user parameters have been set. + Swr context to initialize + + + Injects the specified number of silence samples. + allocated Swr context + number of samples to be dropped + + + Check whether an swr context has been initialized or not. + Swr context to check + + + Convert the next timestamp from input to output timestamps are in 1/(in_sample_rate * out_sample_rate) units. + + + Set a customized input channel mapping. + allocated Swr context, not yet initialized + customized input channel mapping (array of channel indexes, -1 for a muted channel) + + + Activate resampling compensation ("soft" compensation). This function is internally called when needed in swr_next_pts(). + allocated Swr context. If it is not initialized, or SWR_FLAG_RESAMPLE is not set, swr_init() is called with the flag set. + delta in PTS per sample + number of samples to compensate for + + + Set a customized remix matrix. + allocated Swr context, not yet initialized + remix coefficients; matrix[i + stride * o] is the weight of input channel i in output channel o + offset between lines of the matrix + + + Return the swr build-time configuration. + + + Return the swr license. + + + Return the + + + Allocate an empty SwsContext. This must be filled and passed to sws_init_context(). For filling see AVOptions, options.c and sws_setColorspaceDetails(). + + + Allocate and return an uninitialized vector with length coefficients. + + + Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + source frame buffer + destination frame buffer + number of pixels to convert + array with [256] entries, which must match color arrangement (RGB or BGR) of src + + + Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + source frame buffer + destination frame buffer + number of pixels to convert + array with [256] entries, which must match color arrangement (RGB or BGR) of src + + + Free the swscaler context swsContext. If swsContext is NULL, then does nothing. + + + Get the AVClass for swsContext. It can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options. + + + Check if context can be reused, otherwise reallocate a new one. + + + Return a pointer to yuv<->rgb coefficients for the given colorspace suitable for sws_setColorspaceDetails(). + One of the SWS_CS_* macros. If invalid, SWS_CS_DEFAULT is used. + + + Returns -1 if not supported + + + Allocate and return an SwsContext. You need it to perform scaling/conversion operations using sws_scale(). + the width of the source image + the height of the source image + the source image format + the width of the destination image + the height of the destination image + the destination image format + specify which algorithm and options to use for rescaling + extra parameters to tune the used scaler For SWS_BICUBIC param[0] and [1] tune the shape of the basis function, param[0] tunes f(1) and param[1] f´(1) For SWS_GAUSS param[0] tunes the exponent and thus cutoff frequency For SWS_LANCZOS param[0] tunes the width of the window function + + + Return a normalized Gaussian curve used to filter stuff quality = 3 is high quality, lower is lower quality. + + + Initialize the swscaler context sws_context. + + + Returns a positive value if an endianness conversion for pix_fmt is supported, 0 otherwise. + the pixel format + + + Return a positive value if pix_fmt is a supported input format, 0 otherwise. + + + Return a positive value if pix_fmt is a supported output format, 0 otherwise. + + + Scale all the coefficients of a so that their sum equals height. + + + Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst. A slice is a sequence of consecutive rows in an image. + the scaling context previously created with sws_getContext() + the array containing the pointers to the planes of the source slice + the array containing the strides for each plane of the source image + the position in the source image of the slice to process, that is the number (counted starting from zero) in the image of the first row of the slice + the height of the source slice, that is the number of rows in the slice + the array containing the pointers to the planes of the destination image + the array containing the strides for each plane of the destination image + + + Scale all the coefficients of a by the scalar value. + + + Returns -1 if not supported + the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x] + flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x] + flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + 16.16 fixed point brightness correction + 16.16 fixed point contrast correction + 16.16 fixed point saturation correction + + + Return the libswscale build-time configuration. + + + Return the libswscale license. + + + Color conversion and scaling library. + + + Rational number (pair of numerator and denominator). + + + Numerator + + + Denominator + + + Describe the class of an AVClass context structure. That is an arbitrary struct of which the first field is a pointer to an AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + + + The name of the class; usually it is the same name as the context structure type to which the AVClass is associated. + + + A pointer to a function which returns the name of a context instance ctx associated with the class. + + + a pointer to the first option specified in the class if any or NULL + + + LIBAVUTIL_VERSION with which this structure was created. This is used to allow fields to be added without requiring major version bumps everywhere. + + + Offset in the structure where log_level_offset is stored. 0 means there is no such variable + + + Offset in the structure where a pointer to the parent context for logging is stored. For example a decoder could pass its AVCodecContext to eval as such a parent context, which an av_log() implementation could then leverage to display the parent context. The offset can be NULL. + + + Return next AVOptions-enabled child or NULL + + + Return an AVClass corresponding to the next potential AVOptions-enabled child. + + + Category used for visualization (like color) This is only set if the category is equal for all objects using this class. available since version (51 << 16 | 56 << 8 | 100) + + + Callback to return the category. available since version (51 << 16 | 59 << 8 | 100) + + + Callback to return the supported/allowed ranges. available since version (52.12) + + + AVOption + + + short English help text + + + The offset relative to the context structure where the option value is stored. It should be 0 for named constants. + + + minimum valid value for the option + + + maximum valid value for the option + + + The logical unit to which the option belongs. Non-constant options and corresponding named constants share the same unit. May be NULL. + + + List of AVOptionRange structs. + + + Array of option ranges. + + + Number of ranges per component. + + + Number of componentes. + + + A reference to a data buffer. + + + The data buffer. It is considered writable if and only if this is the only reference to the buffer, in which case av_buffer_is_writable() returns 1. + + + Size of data in bytes. + + + Structure to hold side data for an AVFrame. + + + This structure describes decoded (raw) audio or video data. + + + pointer to the picture/channel planes. This might be different from the first allocated byte + + + For video, size in bytes of each picture line. For audio, size in bytes of each plane. + + + pointers to the data planes/channels. + + + width and height of the video frame + + + width and height of the video frame + + + number of audio samples (per channel) described by this frame + + + format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames, enum AVSampleFormat for audio) + + + 1 -> keyframe, 0-> not + + + Picture type of the frame. + + + Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + + + Presentation timestamp in time_base units (time when frame should be shown to user). + + + PTS copied from the AVPacket that was decoded to produce this frame. + + + DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) This is also the Presentation time of this AVFrame calculated from only AVPacket.dts values without pts values. + + + picture number in bitstream order + + + picture number in display order + + + quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + + + for some private data of the user + + + When decoding, this signals how much the picture must be delayed. extra_delay = repeat_pict / (2*fps) + + + The content of the picture is interlaced. + + + If the content is interlaced, is top field displayed first. + + + Tell user application that palette has changed from previous frame. + + + reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything). The user sets AVCodecContext.reordered_opaque to represent the input at that time, the decoder reorders values as needed and sets AVFrame.reordered_opaque to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + + + Sample rate of the audio data. + + + Channel layout of the audio data. + + + AVBuffer references backing the data for this frame. If all elements of this array are NULL, then this frame is not reference counted. This array must be filled contiguously -- if buf[i] is non-NULL then buf[j] must also be non-NULL for all j < i. + + + For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf. + + + Number of elements in extended_buf. + + + Frame flags, a combination of + + + MPEG vs JPEG YUV range. - encoding: Set by user - decoding: Set by libavcodec + + + YUV colorspace type. - encoding: Set by user - decoding: Set by libavcodec + + + frame timestamp estimated using various heuristics, in stream time base - encoding: unused - decoding: set by libavcodec, read by user. + + + reordered pos from the last AVPacket that has been input into the decoder - encoding: unused - decoding: Read by user. + + + duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown. - encoding: unused - decoding: Read by user. + + + metadata. - encoding: Set by user. - decoding: Set by libavcodec. + + + decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there were errors during the decoding. - encoding: unused - decoding: set by libavcodec, read by user. + + + number of audio channels, only used for audio. - encoding: unused - decoding: Read by user. + + + size of the corresponding packet containing the compressed frame. It is set to a negative value if unknown. - encoding: unused - decoding: set by libavcodec, read by user. + + + QP table + + + QP store stride + + + For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame. + + + AVBufferRef for free use by the API user. FFmpeg will never check the contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when the frame is unreferenced. av_frame_copy_props() calls create a new reference with av_buffer_ref() for the target frame's opaque_ref field. + + + the default value for scalar options + + + A single allowed range of values, or a single allowed value. + + + Value range. For string ranges this represents the min/max length. For dimensions this represents the min/max pixel count or width/height in multi-component case. + + + Value range. For string ranges this represents the min/max length. For dimensions this represents the min/max pixel count or width/height in multi-component case. + + + Value's component range. For string this represents the unicode range for chars, 0-127 limits to ASCII. + + + Value's component range. For string this represents the unicode range for chars, 0-127 limits to ASCII. + + + Range flag. If set to 1 the struct encodes a range, if set to 0 a single value. + + + Which of the 4 planes contains the component. + + + Number of elements between 2 horizontally consecutive pixels. Elements are bits for bitstream formats, bytes otherwise. + + + Number of elements before the component of the first pixel. Elements are bits for bitstream formats, bytes otherwise. + + + Number of least significant bits that must be shifted away to get the value. + + + Number of bits in the component. + + + deprecated, use step instead + + + deprecated, use depth instead + + + deprecated, use offset instead + + + Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes of an image. It also stores the subsampling factors and number of components. + + + The number of components each pixel has, (1-4) + + + Amount to shift the luma width right to find the chroma width. For YV12 this is 1 for example. chroma_width = AV_CEIL_RSHIFT(luma_width, log2_chroma_w) The note above is needed to ensure rounding up. This value only refers to the chroma components. + + + Amount to shift the luma height right to find the chroma height. For YV12 this is 1 for example. chroma_height= AV_CEIL_RSHIFT(luma_height, log2_chroma_h) The note above is needed to ensure rounding up. This value only refers to the chroma components. + + + Combination of AV_PIX_FMT_FLAG_... flags. + + + Parameters that describe how pixels are packed. If the format has 1 or 2 components, then luma is 0. If the format has 3 or 4 components: if the RGB flag is set then 0 is red, 1 is green and 2 is blue; otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + + + Alternative comma-separated names. + + + timecode frame start (first base frame number) + + + flags such as drop frame, +24 hours support, ... + + + frame rate in rational form + + + frame per second; must be consistent with the rate field + + + This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e. state that is not tied to a concrete processing configuration. E.g., in an API that supports hardware-accelerated encoding and decoding, this struct will (if possible) wrap the state that is common to both encoding and decoding and from which specific instances of encoders or decoders can be derived. + + + A class for logging. Set by av_hwdevice_ctx_alloc(). + + + Private data used internally by libavutil. Must not be accessed in any way by the caller. + + + This field identifies the underlying API used for hardware access. + + + The format-specific data, allocated and freed by libavutil along with this context. + + + This field may be set by the caller before calling av_hwdevice_ctx_init(). + + + Arbitrary user data, to be used e.g. by the free() callback. + + + This struct describes a set or pool of "hardware" frames (i.e. those with data not located in normal system memory). All the frames in the pool are assumed to be allocated in the same way and interchangeable. + + + A class for logging. + + + Private data used internally by libavutil. Must not be accessed in any way by the caller. + + + A reference to the parent AVHWDeviceContext. This reference is owned and managed by the enclosing AVHWFramesContext, but the caller may derive additional references from it. + + + The parent AVHWDeviceContext. This is simply a pointer to device_ref->data provided for convenience. + + + The format-specific data, allocated and freed automatically along with this context. + + + This field may be set by the caller before calling av_hwframe_ctx_init(). + + + Arbitrary user data, to be used e.g. by the free() callback. + + + A pool from which the frames are allocated by av_hwframe_get_buffer(). This field may be set by the caller before calling av_hwframe_ctx_init(). The buffers returned by calling av_buffer_pool_get() on this pool must have the properties described in the documentation in the corresponding hw type's header (hwcontext_*.h). The pool will be freed strictly before this struct's free() callback is invoked. + + + Initial size of the frame pool. If a device type does not support dynamically resizing the pool, then this is also the maximum pool size. + + + The pixel format identifying the underlying HW surface type. + + + The pixel format identifying the actual data layout of the hardware frames. + + + The allocated dimensions of the frames in this pool. + + + The allocated dimensions of the frames in this pool. + + + This struct describes the constraints on hardware frames attached to a given device with a hardware-specific configuration. This is returned by av_hwdevice_get_hwframe_constraints() and must be freed by av_hwframe_constraints_free() after use. + + + A list of possible values for format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE. This member will always be filled. + + + A list of possible values for sw_format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE. Can be NULL if this information is not known. + + + The minimum size of frames in this hw_frames_ctx. (Zero if not known.) + + + The maximum size of frames in this hw_frames_ctx. (INT_MAX if not known / no limit.) + + + This struct is allocated as AVHWDeviceContext.hwctx + + + This struct is allocated as AVHWFramesContext.hwctx + + + The surface type (e.g. DXVA2_VideoProcessorRenderTarget or DXVA2_VideoDecoderRenderTarget). Must be set by the caller. + + + The surface pool. When an external pool is not provided by the caller, this will be managed (allocated and filled on init, freed on uninit) by libavutil. + + + Certain drivers require the decoder to be destroyed before the surfaces. To allow internally managed pools to work properly in such cases, this field is provided. + + + pointer to the list of coefficients + + + number of coefficients in the vector + + + This struct describes the properties of a single codec described by an AVCodecID. + + + Name of the codec described by this descriptor. It is non-empty and unique for each codec descriptor. It should contain alphanumeric characters and '_' only. + + + A more descriptive name for this codec. May be NULL. + + + Codec properties, a combination of AV_CODEC_PROP_* flags. + + + MIME type(s) associated with the codec. May be NULL; if not, a NULL-terminated array of MIME types. The first item is always non-NULL and is the preferred MIME type. + + + If non-NULL, an array of profiles recognized for this codec. Terminated with FF_PROFILE_UNKNOWN. + + + AVProfile. + + + short name for the profile + + + Pan Scan area. This specifies the area which should be displayed. Note there may be multiple such areas for one frame. + + + id - encoding: Set by user. - decoding: Set by libavcodec. + + + width and height in 1/16 pel - encoding: Set by user. - decoding: Set by libavcodec. + + + position of the top left corner in 1/16 pel for up to 3 fields/frames - encoding: Set by user. - decoding: Set by libavcodec. + + + This structure describes the bitrate properties of an encoded bitstream. It roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD parameters for H.264/HEVC. + + + Maximum bitrate of the stream, in bits per second. Zero if unknown or unspecified. + + + Minimum bitrate of the stream, in bits per second. Zero if unknown or unspecified. + + + Average bitrate of the stream, in bits per second. Zero if unknown or unspecified. + + + The size of the buffer to which the ratecontrol is applied, in bits. Zero if unknown or unspecified. + + + The delay between the time the packet this structure is associated with is received and the time when it should be decoded, in periods of a 27MHz clock. + + + This structure stores compressed data. It is typically exported by demuxers and then passed as input to decoders, or received as output from encoders and then passed to muxers. + + + A reference to the reference-counted buffer where the packet data is stored. May be NULL, then the packet data is not reference-counted. + + + Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will be presented to the user. Can be AV_NOPTS_VALUE if it is not stored in the file. pts MUST be larger or equal to dts as presentation cannot happen before decompression, unless one wants to view hex dumps. Some formats misuse the terms dts and pts/cts to mean something different. Such timestamps must be converted to true pts/dts before they are stored in AVPacket. + + + Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed. Can be AV_NOPTS_VALUE if it is not stored in the file. + + + A combination of AV_PKT_FLAG values + + + Additional packet data that can be provided by the container. Packet can contain several types of side information. + + + Duration of this packet in AVStream->time_base units, 0 if unknown. Equals next_pts - this_pts in presentation order. + + + byte position in stream, -1 if unknown + + + main external API structure. New fields can be added to the end with minor version bumps. Removal, reordering and changes to existing fields require a major version bump. You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user applications. The name string for AVOptions options matches the associated command line parameter name and can be found in libavcodec/options_table.h The AVOption/command line parameter names differ in some cases from the C structure field names for historic reasons or brevity. sizeof(AVCodecContext) must not be used outside libav*. + + + information on struct for av_log - set by avcodec_alloc_context3 + + + fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). This is used to work around some encoder bugs. A demuxer should set this to what is stored in the field used to identify the codec. If there are multiple such fields in a container then the demuxer should choose the one which maximizes the information about the used codec. If the codec tag field in a container is larger than 32 bits then the demuxer should remap the longer ID to 32 bits with a table or other structure. Alternatively a new extra_codec_tag + size could be added but for this a clear advantage must be demonstrated first. - encoding: Set by user, if not then the default based on codec_id will be used. - decoding: Set by user, will be converted to uppercase by libavcodec during init. + + + Private context used for internal data. + + + Private data of the user, can be used to carry app specific stuff. - encoding: Set by user. - decoding: Set by user. + + + the average bitrate - encoding: Set by user; unused for constant quantizer encoding. - decoding: Set by user, may be overwritten by libavcodec if this info is available in the stream + + + number of bits the bitstream is allowed to diverge from the reference. the reference can be CBR (for CBR pass1) or VBR (for pass2) - encoding: Set by user; unused for constant quantizer encoding. - decoding: unused + + + Global quality for codecs which cannot change it per frame. This should be proportional to MPEG-1/2/4 qscale. - encoding: Set by user. - decoding: unused + + + - encoding: Set by user. - decoding: unused + + + AV_CODEC_FLAG_*. - encoding: Set by user. - decoding: Set by user. + + + AV_CODEC_FLAG2_* - encoding: Set by user. - decoding: Set by user. + + + some codecs need / can use extradata like Huffman tables. MJPEG: Huffman tables rv10: additional flags MPEG-4: global headers (they can be in the bitstream or here) The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger than extradata_size to avoid problems if it is read with the bitstream reader. The bytewise contents of extradata must not depend on the architecture or CPU endianness. - encoding: Set/allocated/freed by libavcodec. - decoding: Set/allocated/freed by user. + + + This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. For fixed-fps content, timebase should be 1/framerate and timestamp increments should be identically 1. This often, but not always is the inverse of the frame rate or field rate for video. 1/time_base is not the average frame rate if the frame rate is not constant. + + + For some codecs, the time base is closer to the field rate than the frame rate. Most notably, H.264 and MPEG-2 specify time_base as half of frame duration if no telecine is used ... + + + Codec delay. + + + picture width / height. + + + picture width / height. + + + Bitstream width / height, may be different from width/height e.g. when the decoded frame is cropped before being output or lowres is enabled. + + + Bitstream width / height, may be different from width/height e.g. when the decoded frame is cropped before being output or lowres is enabled. + + + the number of pictures in a group of pictures, or 0 for intra_only - encoding: Set by user. - decoding: unused + + + Pixel format, see AV_PIX_FMT_xxx. May be set by the demuxer if known from headers. May be overridden by the decoder if it knows better. + + + This option does nothing + + + If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band. It improves cache usage. Not all codecs can do that. You must check the codec capabilities beforehand. When multithreading is used, it may be called from multiple threads at the same time; threads might draw different parts of the same AVFrame, or multiple AVFrames, and there is no guarantee that slices will be drawn in order. The function is also used by hardware acceleration APIs. It is called at least once during frame decoding to pass the data needed for hardware render. In that mode instead of pixel data, AVFrame points to a structure specific to the acceleration API. The application reads the structure and can change some fields to indicate progress or mark state. - encoding: unused - decoding: Set by user. + + + callback to negotiate the pixelFormat + + + maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 relative to the input. - encoding: Set by user. - decoding: unused + + + qscale factor between IP and B-frames If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). - encoding: Set by user. - decoding: unused + + + qscale offset between IP and B-frames - encoding: Set by user. - decoding: unused + + + Size of the frame reordering buffer in the decoder. For MPEG-2 it is 1 IPB or 0 low delay IP. - encoding: Set by libavcodec. - decoding: Set by libavcodec. + + + qscale factor between P- and I-frames If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset). If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). - encoding: Set by user. - decoding: unused + + + qscale offset between P and I-frames - encoding: Set by user. - decoding: unused + + + luminance masking (0-> disabled) - encoding: Set by user. - decoding: unused + + + temporary complexity masking (0-> disabled) - encoding: Set by user. - decoding: unused + + + spatial complexity masking (0-> disabled) - encoding: Set by user. - decoding: unused + + + p block masking (0-> disabled) - encoding: Set by user. - decoding: unused + + + darkness masking (0-> disabled) - encoding: Set by user. - decoding: unused + + + slice count - encoding: Set by libavcodec. - decoding: Set by user (or 0). + + + slice offsets in the frame in bytes - encoding: Set/allocated by libavcodec. - decoding: Set/allocated by user (or NULL). + + + sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel. Numerator and denominator must be relatively prime and smaller than 256 for some video standards. - encoding: Set by user. - decoding: Set by libavcodec. + + + motion estimation comparison function - encoding: Set by user. - decoding: unused + + + subpixel motion estimation comparison function - encoding: Set by user. - decoding: unused + + + macroblock comparison function (not supported yet) - encoding: Set by user. - decoding: unused + + + interlaced DCT comparison function - encoding: Set by user. - decoding: unused + + + ME diamond size & shape - encoding: Set by user. - decoding: unused + + + amount of previous MV predictors (2a+1 x 2a+1 square) - encoding: Set by user. - decoding: unused + + + motion estimation prepass comparison function - encoding: Set by user. - decoding: unused + + + ME prepass diamond size & shape - encoding: Set by user. - decoding: unused + + + subpel ME quality - encoding: Set by user. - decoding: unused + + + DTG active format information (additional aspect ratio information only used in DVB MPEG-2 transport streams) 0 if not set. + + + maximum motion estimation search range in subpel units If 0 then no limit. + + + slice flags - encoding: unused - decoding: Set by user. + + + XVideo Motion Acceleration - encoding: forbidden - decoding: set by decoder + + + macroblock decision mode - encoding: Set by user. - decoding: unused + + + custom intra quantization matrix - encoding: Set by user, can be NULL. - decoding: Set by libavcodec. + + + custom inter quantization matrix - encoding: Set by user, can be NULL. - decoding: Set by libavcodec. + + + precision of the intra DC coefficient - 8 - encoding: Set by user. - decoding: Set by libavcodec + + + Number of macroblock rows at the top which are skipped. - encoding: unused - decoding: Set by user. + + + Number of macroblock rows at the bottom which are skipped. - encoding: unused - decoding: Set by user. + + + minimum MB Lagrange multiplier - encoding: Set by user. - decoding: unused + + + maximum MB Lagrange multiplier - encoding: Set by user. - decoding: unused + + + - encoding: Set by user. - decoding: unused + + + minimum GOP size - encoding: Set by user. - decoding: unused + + + number of reference frames - encoding: Set by user. - decoding: Set by lavc. + + + Multiplied by qscale for each frame and added to scene_change_score. - encoding: Set by user. - decoding: unused + + + Note: Value depends upon the compare function used for fullpel ME. - encoding: Set by user. - decoding: unused + + + Chromaticity coordinates of the source primaries. - encoding: Set by user - decoding: Set by libavcodec + + + Color Transfer Characteristic. - encoding: Set by user - decoding: Set by libavcodec + + + YUV colorspace type. - encoding: Set by user - decoding: Set by libavcodec + + + MPEG vs JPEG YUV range. - encoding: Set by user - decoding: Set by libavcodec + + + This defines the location of chroma samples. - encoding: Set by user - decoding: Set by libavcodec + + + Number of slices. Indicates number of picture subdivisions. Used for parallelized decoding. - encoding: Set by user - decoding: unused + + + Field order - encoding: set by libavcodec - decoding: Set by user. + + + samples per second + + + number of audio channels + + + sample format + + + Number of samples per channel in an audio frame. + + + Frame counter, set by libavcodec. + + + number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs. + + + Audio cutoff bandwidth (0 means "automatic") - encoding: Set by user. - decoding: unused + + + Audio channel layout. - encoding: set by user. - decoding: set by user, may be overwritten by libavcodec. + + + Request decoder to use this channel layout if it can (0 for default) - encoding: unused - decoding: Set by user. + + + Type of service that the audio stream conveys. - encoding: Set by user. - decoding: Set by libavcodec. + + + desired sample format - encoding: Not used. - decoding: Set by user. Decoder will decode to this format if it can. + + + This callback is called at the beginning of each frame to get data buffer(s) for it. There may be one contiguous buffer for all the data or there may be a buffer per each data plane or anything in between. What this means is, you may set however many entries in buf[] you feel necessary. Each buffer must be reference-counted using the AVBuffer API (see description of buf[] below). + + + If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted and are valid indefinitely. The caller must free them with av_frame_unref() when they are not needed anymore. Otherwise, the decoded frames must not be freed by the caller and are only valid until the next decode call. + + + amount of qscale change between easy & hard scenes (0.0-1.0) + + + amount of qscale smoothing over time (0.0-1.0) + + + minimum quantizer - encoding: Set by user. - decoding: unused + + + maximum quantizer - encoding: Set by user. - decoding: unused + + + maximum quantizer difference between frames - encoding: Set by user. - decoding: unused + + + decoder bitstream buffer size - encoding: Set by user. - decoding: unused + + + ratecontrol override, see RcOverride - encoding: Allocated/set/freed by user. - decoding: unused + + + maximum bitrate - encoding: Set by user. - decoding: Set by user, may be overwritten by libavcodec. + + + minimum bitrate - encoding: Set by user. - decoding: unused + + + Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow. - encoding: Set by user. - decoding: unused. + + + Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow. - encoding: Set by user. - decoding: unused. + + + Number of bits which should be loaded into the rc buffer before decoding starts. - encoding: Set by user. - decoding: unused + + + trellis RD quantization - encoding: Set by user. - decoding: unused + + + pass1 encoding statistics output buffer - encoding: Set by libavcodec. - decoding: unused + + + pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed here. - encoding: Allocated/set/freed by user. - decoding: unused + + + Work around bugs in encoders which sometimes cannot be detected automatically. - encoding: Set by user - decoding: Set by user + + + strictly follow the standard (MPEG-4, ...). - encoding: Set by user. - decoding: Set by user. Setting this to STRICT or higher means the encoder and decoder will generally do stupid things, whereas setting it to unofficial or lower will mean the encoder might produce output that is not supported by all spec-compliant decoders. Decoders don't differentiate between normal, unofficial and experimental (that is, they always try to decode things when they can) unless they are explicitly asked to behave stupidly (=strictly conform to the specs) + + + error concealment flags - encoding: unused - decoding: Set by user. + + + debug - encoding: Set by user. - decoding: Set by user. + + + debug - encoding: Set by user. - decoding: Set by user. + + + Error recognition; may misdetect some more or less valid parts as errors. - encoding: unused - decoding: Set by user. + + + opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque - encoding: unused - decoding: Set by user. + + + Hardware accelerator in use - encoding: unused. - decoding: Set by libavcodec + + + Hardware accelerator context. For some hardware accelerators, a global context needs to be provided by the user. In that case, this holds display-dependent data FFmpeg cannot instantiate itself. Please refer to the FFmpeg HW accelerator documentation to know how to fill this is. e.g. for VA API, this is a struct vaapi_context. - encoding: unused - decoding: Set by user + + + error - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR. - decoding: unused + + + DCT algorithm, see FF_DCT_* below - encoding: Set by user. - decoding: unused + + + IDCT algorithm, see FF_IDCT_* below. - encoding: Set by user. - decoding: Set by user. + + + bits per sample/pixel from the demuxer (needed for huffyuv). - encoding: Set by libavcodec. - decoding: Set by user. + + + Bits per sample/pixel of internal libavcodec pixel/sample format. - encoding: set by user. - decoding: set by libavcodec. + + + low resolution decoding, 1-> 1/2 size, 2->1/4 size - encoding: unused - decoding: Set by user. + + + the picture in the bitstream - encoding: Set by libavcodec. - decoding: unused + + + thread count is used to decide how many independent tasks should be passed to execute() - encoding: Set by user. - decoding: Set by user. + + + Which multithreading methods to use. Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, so clients which cannot provide future frames should not use it. + + + Which multithreading methods are in use by the codec. - encoding: Set by libavcodec. - decoding: Set by libavcodec. + + + Set by the client if its custom get_buffer() callback can be called synchronously from another thread, which allows faster multithreaded decoding. draw_horiz_band() will be called from other threads regardless of this setting. Ignored if the default get_buffer() is used. - encoding: Set by user. - decoding: Set by user. + + + The codec may call this to execute several independent things. It will return only after finishing all tasks. The user may replace this with some multithreaded implementation, the default implementation will execute the parts serially. + + + The codec may call this to execute several independent things. It will return only after finishing all tasks. The user may replace this with some multithreaded implementation, the default implementation will execute the parts serially. Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + + + noise vs. sse weight for the nsse comparison function - encoding: Set by user. - decoding: unused + + + profile - encoding: Set by user. - decoding: Set by libavcodec. + + + level - encoding: Set by user. - decoding: Set by libavcodec. + + + Skip loop filtering for selected frames. - encoding: unused - decoding: Set by user. + + + Skip IDCT/dequantization for selected frames. - encoding: unused - decoding: Set by user. + + + Skip decoding for selected frames. - encoding: unused - decoding: Set by user. + + + Header containing style information for text subtitles. For SUBTITLE_ASS subtitle type, it should contain the whole ASS [Script Info] and [V4+ Styles] section, plus the [Events] line and the Format line following. It shouldn't include any Dialogue line. - encoding: Set/allocated/freed by user (before avcodec_open2()) - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + + + VBV delay coded in the last frame (in periods of a 27 MHz clock). Used for compliant TS muxing. - encoding: Set by libavcodec. - decoding: unused. + + + Encoding only and set by default. Allow encoders to output packets that do not contain any encoded data, only side data. + + + Audio only. The number of "priming" samples (padding) inserted by the encoder at the beginning of the audio. I.e. this number of leading decoded samples must be discarded by the caller to get the original audio without leading padding. + + + - decoding: For codecs that store a framerate value in the compressed bitstream, the decoder may export it here. { 0, 1} when unknown. - encoding: May be used to signal the framerate of CFR content to an encoder. + + + Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx. - encoding: unused. - decoding: Set by libavcodec before calling get_format() + + + Timebase in which pkt_dts/pts and AVPacket.dts/pts are. - encoding unused. - decoding set by user. + + + AVCodecDescriptor - encoding: unused. - decoding: set by libavcodec. + + + Current statistics for PTS correction. - decoding: maintained and used by libavcodec, not intended to be used by user apps - encoding: unused + + + Number of incorrect PTS values so far + + + Number of incorrect DTS values so far + + + PTS of the last frame + + + Character encoding of the input subtitles file. - decoding: set by user - encoding: unused + + + Subtitles character encoding mode. Formats or codecs might be adjusting this setting (if they are doing the conversion themselves for instance). - decoding: set by libavcodec - encoding: unused + + + Skip processing alpha if supported by codec. Note that if the format uses pre-multiplied alpha (common with VP6, and recommended due to better video quality/compression) the image will look as if alpha-blended onto a black background. However for formats that do not use pre-multiplied alpha there might be serious artefacts (though e.g. libswscale currently assumes pre-multiplied alpha anyway). + + + Number of samples to skip after a discontinuity - decoding: unused - encoding: set by libavcodec + + + custom intra quantization matrix - encoding: Set by user, can be NULL. - decoding: unused. + + + dump format separator. can be ", " or " " or anything else - encoding: Set by user. - decoding: Set by user. + + + ',' separated list of allowed decoders. If NULL then all are allowed - encoding: unused - decoding: set by user + + + Additional data associated with the entire coded stream. + + + A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames. The reference is set by the caller and afterwards owned (and freed) by libavcodec - it should never be read by the caller after being set. + + + Control the form of AVSubtitle.rects[N]->ass - decoding: set by user - encoding: unused + + + Audio only. The amount of padding (in samples) appended by the encoder to the end of the audio. I.e. this number of decoded samples must be discarded by the caller from the end of the stream to get the original audio without any trailing padding. + + + The number of pixels per image to maximally accept. + + + A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/decoder. The reference is set by the caller and afterwards owned (and freed) by libavcodec. + + + Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated decoding (if active). - encoding: unused - decoding: Set by user (either before avcodec_open2(), or in the AVCodecContext.get_format callback) + + + AVCodec. + + + Name of the codec implementation. The name is globally unique among encoders and among decoders (but an encoder and a decoder can share the same name). This is the primary way to find a codec from the user perspective. + + + Descriptive name for the codec, meant to be more human readable than name. You should use the NULL_IF_CONFIG_SMALL() macro to define it. + + + Codec capabilities. see AV_CODEC_CAP_* + + + array of supported framerates, or NULL if any, array is terminated by {0,0} + + + array of supported pixel formats, or NULL if unknown, array is terminated by -1 + + + array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + + + array of supported sample formats, or NULL if unknown, array is terminated by -1 + + + array of support channel layouts, or NULL if unknown. array is terminated by 0 + + + maximum value for lowres supported by the decoder + + + AVClass for the private context + + + array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + + *************************************************************** No fields below this line are part of the public API. They may not be used outside of libavcodec and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + @{ + + + Copy necessary context variables from a previous thread context to the current one. If not defined, the next thread will start automatically; otherwise, the codec must call ff_thread_finish_setup(). + + + Private codec-specific defaults. + + + Initialize codec static data, called from avcodec_register(). + + + Encode data to an AVPacket. + + + Decode/encode API with decoupled packet/frame dataflow. The API is the same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except that: - never called if the codec is closed or the wrong type, - AVPacket parameter change side data is applied right before calling AVCodec->send_packet, - if AV_CODEC_CAP_DELAY is not set, drain packets or frames are never sent, - only one drain packet is ever passed down (until the next flush()), - a drain AVPacket is always NULL (no need to check for avpkt->size). + + + Flush buffers. Will be called when seeking + + + Internal codec capabilities. See FF_CODEC_CAP_* in internal.h + + + Same as packet pts, in AV_TIME_BASE + + + top left corner of pict, undefined when pict is not set + + + top left corner of pict, undefined when pict is not set + + + width of pict, undefined when pict is not set + + + height of pict, undefined when pict is not set + + + number of colors in pict, undefined when pict is not set + + + data+linesize for the bitmap of this subtitle. Can be set for text/ass as well once they are rendered. + + + 0 terminated plain UTF-8 text + + + 0 terminated ASS/SSA compatible event line. The presentation of this is unaffected by the other values in this struct. + + + Picture data structure. + + + pointers to the image data planes + + + number of bytes per line + + + @{ + + + Name of the hardware accelerated codec. The name is globally unique among encoders and among decoders (but an encoder and a decoder can share the same name). + + + Type of codec implemented by the hardware accelerator. + + + Codec implemented by the hardware accelerator. + + + Supported pixel format. + + + Hardware accelerated codec capabilities. see HWACCEL_CODEC_CAP_* + + + *************************************************************** No fields below this line are part of the public API. They may not be used outside of libavcodec and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + Allocate a custom buffer + + + Called at the beginning of each frame or field picture. + + + Callback for each slice. + + + Called at the end of each frame or field picture. + + + Size of per-frame hardware accelerator private data. + + + Called for every Macroblock in a slice. + + + Initialize the hwaccel private data. + + + Uninitialize the hwaccel private data. + + + Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data. + + + Internal hwaccel capabilities. + + + This struct describes the properties of an encoded stream. + + + General type of the encoded data. + + + Specific type of the encoded data (the codec used). + + + Additional information about the codec (corresponds to the AVI FOURCC). + + + Extra binary data needed for initializing the decoder, codec-dependent. + + + Size of the extradata content in bytes. + + + - video: the pixel format, the value corresponds to enum AVPixelFormat. - audio: the sample format, the value corresponds to enum AVSampleFormat. + + + The average bitrate of the encoded data (in bits per second). + + + The number of bits per sample in the codedwords. + + + This is the number of valid bits in each output sample. If the sample format has more bits, the least significant bits are additional padding bits, which are always 0. Use right shifts to reduce the sample to its actual size. For example, audio formats with 24 bit samples will have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32. To get the original sample use "(int32_t)sample >> 8"." + + + Codec-specific bitstream restrictions that the stream conforms to. + + + Video only. The dimensions of the video frame in pixels. + + + Video only. The aspect ratio (width / height) which a single pixel should have when displayed. + + + Video only. The order of the fields in interlaced video. + + + Video only. Additional colorspace characteristics. + + + Video only. Number of delayed frames. + + + Audio only. The channel layout bitmask. May be 0 if the channel layout is unknown or unspecified, otherwise the number of bits set must be equal to the channels field. + + + Audio only. The number of audio channels. + + + Audio only. The number of audio samples per second. + + + Audio only. The number of bytes per coded audio frame, required by some formats. + + + Audio only. Audio frame size, if known. Required by some formats to be static. + + + Audio only. The amount of padding (in samples) inserted by the encoder at the beginning of the audio. I.e. this number of leading decoded samples must be discarded by the caller to get the original audio without leading padding. + + + Audio only. The amount of padding (in samples) appended by the encoder to the end of the audio. I.e. this number of decoded samples must be discarded by the caller from the end of the stream to get the original audio without any trailing padding. + + + Audio only. Number of samples to skip after a discontinuity. + + + This field is used for proper frame duration computation in lavf. It signals, how much longer the frame duration of the current frame is compared to normal frame duration. + + + byte offset from starting packet start + + + Set by parser to 1 for key frames and 0 for non-key frames. It is initialized to -1, so if the parser doesn't set this flag, old-style fallback using AV_PICTURE_TYPE_I picture type as key frames will be used. + + + Synchronization point for start of timestamp generation. + + + Offset of the current timestamp against last timestamp sync point in units of AVCodecContext.time_base. + + + Presentation delay of current frame in units of AVCodecContext.time_base. + + + Position of the packet in file. + + + Byte position of currently parsed frame in stream. + + + Previous frame byte position. + + + Duration of the current frame. For audio, this is in units of 1 / AVCodecContext.sample_rate. For all other types, this is in units of AVCodecContext.time_base. + + + Indicate whether a picture is coded as a frame, top field or bottom field. + + + Picture number incremented in presentation or output order. This field may be reinitialized at the first picture of a new sequence. + + + Dimensions of the decoded video intended for presentation. + + + Dimensions of the coded video. + + + The format of the coded data, corresponds to enum AVPixelFormat for video and for enum AVSampleFormat for audio. + + + The bitstream filter state. + + + A class for logging and AVOptions + + + The bitstream filter this context is an instance of. + + + Opaque libavcodec internal data. Must not be touched by the caller in any way. + + + Opaque filter-specific private data. If filter->priv_class is non-NULL, this is an AVOptions-enabled struct. + + + Parameters of the input stream. This field is allocated in av_bsf_alloc(), it needs to be filled by the caller before av_bsf_init(). + + + Parameters of the output stream. This field is allocated in av_bsf_alloc(), it is set by the filter in av_bsf_init(). + + + The timebase used for the timestamps of the input packets. Set by the caller before av_bsf_init(). + + + The timebase used for the timestamps of the output packets. Set by the filter in av_bsf_init(). + + + A list of codec ids supported by the filter, terminated by AV_CODEC_ID_NONE. May be NULL, in that case the bitstream filter works with any codec id. + + + A class for the private data, used to declare bitstream filter private AVOptions. This field is NULL for bitstream filters that do not declare any options. + + + *************************************************************** No fields below this line are part of the public API. They may not be used outside of libavcodec and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + Internal default arguments, used if NULL is passed to av_bitstream_filter_filter(). Not for access by library users. + + + This structure is used to provides the necessary configurations and data to the Direct3D11 FFmpeg HWAccel implementation. + + + D3D11 decoder object + + + D3D11 VideoContext + + + D3D11 configuration used to create the decoder + + + The number of surface in the surface array + + + The array of Direct3D surfaces used to create the decoder + + + A bit field configuring the workarounds needed for using the decoder + + + Private to the FFmpeg AVHWAccel implementation + + + Mutex to access video_context + + + This structure contains the data a format has to probe a file. + + + Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. + + + Size of buf except extra allocated bytes + + + mime_type, when known. + + + Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available when seeking to this entry. That means preferable PTS on keyframe based formats. But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better is known + + + Flag is used to indicate which frame should be discarded after decoding. + + + Minimum distance between this and the previous keyframe, used to avoid unneeded searching. + + + Stream structure. New fields can be added to the end with minor version bumps. Removal, reordering and changes to existing fields require a major version bump. sizeof(AVStream) must not be used outside libav*. + + + stream index in AVFormatContext + + + Format-specific stream ID. decoding: set by libavformat encoding: set by the user, replaced by libavformat if left unset + + + This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. + + + Decoding: pts of the first frame of the stream in presentation order, in stream time base. Only set this if you are absolutely 100% sure that the value you set it to really is the pts of the first frame. This may be undefined (AV_NOPTS_VALUE). + + + Decoding: duration of the stream, in stream time base. If a source file does not specify a duration, but does specify a bitrate, this value will be estimated from bitrate and file size. + + + number of frames in this stream if known or 0 + + + AV_DISPOSITION_* bit field + + + Selects which packets can be discarded at will and do not need to be demuxed. + + + sample aspect ratio (0 if unknown) - encoding: Set by user. - decoding: Set by libavformat. + + + Average framerate + + + For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached picture. + + + An array of side data that applies to the whole stream (i.e. the container does not allow it to change between packets). + + + The number of elements in the AVStream.side_data array. + + + Flags for the user to detect events happening on the stream. Flags must be cleared by the user once the event has been handled. A combination of AVSTREAM_EVENT_FLAG_*. + + + number of bits in pts (used for wrapping control) + + + Timestamp corresponding to the last dts sync point. + + + Number of packets to buffer for codec probing + + + Number of frames that have been demuxed during avformat_find_stream_info() + + + last packet in packet_buffer for this stream when muxing. + + + Only used if the format does not support seeking natively. + + + Real base framerate of the stream. This is the lowest framerate with which all timestamps can be represented accurately (it is the least common multiple of all framerates in the stream). Note, this value is just a guess! For example, if the time base is 1/90000 and all frames have either approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + + + Stream Identifier This is the MPEG-TS stream identifier +1 0 means unknown + + + stream probing state -1 -> probing finished 0 -> no probing requested rest -> perform probing with request_probe being the minimum score to accept. NOT PART OF PUBLIC API + + + Indicates that everything up to the next keyframe should be discarded. + + + Number of samples to skip at the start of the frame decoded from the next packet. + + + If not 0, the number of samples that should be skipped from the start of the stream (the samples are removed from packets with pts==0, which also assumes negative timestamps do not happen). Intended for use with formats such as mp3 with ad-hoc gapless audio support. + + + If not 0, the first audio sample that should be discarded from the stream. This is broken by design (needs global sample count), but can't be avoided for broken by design formats such as mp3 with ad-hoc gapless audio support. + + + The sample after last sample that is intended to be discarded after first_discard_sample. Works on frame boundaries only. Used to prevent early EOF if the gapless info is broken (considered concatenated mp3s). + + + Number of internally decoded frames, used internally in libavformat, do not access its lifetime differs from info which is why it is not in that structure. + + + Timestamp offset added to timestamps before muxing NOT PART OF PUBLIC API + + + Internal data to check for wrapping of the time stamp + + + Options for behavior, when a wrap is detected. + + + Internal data to prevent doing update_initial_durations() twice + + + Internal data to generate dts from pts + + + Internal data to analyze DTS and detect faulty mpeg streams + + + Internal data to inject global side data + + + String containing paris of key and values describing recommended encoder configuration. Paris are separated by ','. Keys are separated from values by '='. + + + display aspect ratio (0 if unknown) - encoding: unused - decoding: Set by libavformat to calculate sample_aspect_ratio internally + + + An opaque field for libavformat internal usage. Must not be accessed in any way by callers. + + + The exact value of the fractional number is: 'val + num / den'. num is assumed to be 0 <= num < den. + + + 0 -> decoder has not been searched for yet. >0 -> decoder found <0 -> decoder with codec_id == -found_decoder has not been found + + + Those are used for average framerate estimation. + + + New fields can be added to the end with minor version bumps. Removal, reordering and changes to existing fields require a major version bump. sizeof(AVProgram) must not be used outside libav*. + + + selects which program to discard and which to feed to the caller + + + *************************************************************** All fields below this line are not part of the public API. They may not be used outside of libavformat and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + reference dts for wrap detection + + + behavior on wrap detection + + + unique ID to identify the chapter + + + time base in which the start/end timestamps are specified + + + chapter start/end time in time_base units + + + chapter start/end time in time_base units + + + @{ + + + Descriptive name for the format, meant to be more human-readable than name. You should use the NULL_IF_CONFIG_SMALL() macro to define it. + + + comma-separated filename extensions + + + default audio codec + + + default video codec + + + default subtitle codec + + + can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE + + + List of supported codec_id-codec_tag pairs, ordered by "better choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + + + AVClass for the private context + + + *************************************************************** No fields below this line are part of the public API. They may not be used outside of libavformat and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + size of private data so that it can be allocated in the wrapper + + + Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, pkt can be NULL in order to flush data buffered in the muxer. When flushing, return 0 if there still is more data to flush, or 1 if everything was flushed and there is no more buffered data. + + + Currently only used to set pixel format if not YUV420P. + + + Test if the given codec can be stored in this container. + + + Allows sending messages from application to device. + + + Write an uncoded AVFrame. + + + Returns device list with it properties. + + + Initialize device capabilities submodule. + + + Free device capabilities submodule. + + + default data codec + + + Initialize format. May allocate data here, and set any AVFormatContext or AVStream parameters that need to be set before packets are sent. This method must not write output. + + + Deinitialize format. If present, this is called whenever the muxer is being destroyed, regardless of whether or not the header has been written. + + + Set up any necessary bitstream filtering and extract any extra data needed for the global header. Return 0 if more packets from this stream must be checked; 1 if not. + + + Format I/O context. New fields can be added to the end with minor version bumps. Removal, reordering and changes to existing fields require a major version bump. sizeof(AVFormatContext) must not be used outside libav*, use avformat_alloc_context() to create an AVFormatContext. + + + A class for logging and Exports (de)muxer private options if they exist. + + + The input container format. + + + The output container format. + + + Format private data. This is an AVOptions-enabled struct if and only if iformat/oformat.priv_class is not NULL. + + + I/O context. + + + Flags signalling stream properties. A combination of AVFMTCTX_*. Set by libavformat. + + + Number of elements in AVFormatContext.streams. + + + A list of all streams in the file. New streams are created with avformat_new_stream(). + + + input or output filename + + + Position of the first frame of the component, in AV_TIME_BASE fractional seconds. NEVER set this value directly: It is deduced from the AVStream values. + + + Duration of the stream, in AV_TIME_BASE fractional seconds. Only set this value if you know none of the individual stream durations and also do not set any of them. This is deduced from the AVStream values if not set. + + + Total stream bitrate in bit/s, 0 if not available. Never set it directly if the file_size and the duration are known as FFmpeg can compute it automatically. + + + Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*. Set by the user before avformat_open_input() / avformat_write_header(). + + + Maximum size of the data read from input for determining the input container format. Demuxing only, set by the caller before avformat_open_input(). + + + Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info(). Demuxing only, set by the caller before avformat_find_stream_info(). Can be set to 0 to let avformat choose using a heuristic. + + + Forced video codec_id. Demuxing: Set by user. + + + Forced audio codec_id. Demuxing: Set by user. + + + Forced subtitle codec_id. Demuxing: Set by user. + + + Maximum amount of memory in bytes to use for the index of each stream. If the index exceeds this size, entries will be discarded as needed to maintain a smaller size. This can lead to slower or less accurate seeking (depends on demuxer). Demuxers for which a full in-memory index is mandatory will ignore this. - muxing: unused - demuxing: set by user + + + Maximum amount of memory in bytes to use for buffering frames obtained from realtime capture devices. + + + Number of chapters in AVChapter array. When muxing, chapters are normally written in the file header, so nb_chapters should normally be initialized before write_header is called. Some muxers (e.g. mov and mkv) can also write chapters in the trailer. To write chapters in the trailer, nb_chapters must be zero when write_header is called and non-zero when write_trailer is called. - muxing: set by user - demuxing: set by libavformat + + + Metadata that applies to the whole file. + + + Start time of the stream in real world time, in microseconds since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the stream was captured at this real world time. - muxing: Set by the caller before avformat_write_header(). If set to either 0 or AV_NOPTS_VALUE, then the current wall-time will be used. - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that the value may become known after some number of frames have been received. + + + The number of frames used for determining the framerate in avformat_find_stream_info(). Demuxing only, set by the caller before avformat_find_stream_info(). + + + Error recognition; higher values will detect more errors but may misdetect some more or less valid parts as errors. Demuxing only, set by the caller before avformat_open_input(). + + + Custom interrupt callbacks for the I/O layer. + + + Flags to enable debugging. + + + Maximum buffering duration for interleaving. + + + Allow non-standard and experimental extension + + + Flags for the user to detect events happening on the file. Flags must be cleared by the user once the event has been handled. A combination of AVFMT_EVENT_FLAG_*. + + + Maximum number of packets to read while waiting for the first timestamp. Decoding only. + + + Avoid negative timestamps during muxing. Any value of the AVFMT_AVOID_NEG_TS_* constants. Note, this only works when using av_interleaved_write_frame. (interleave_packet_per_dts is in use) - muxing: Set by user - demuxing: unused + + + Transport stream id. This will be moved into demuxer private options. Thus no API/ABI compatibility + + + Audio preload in microseconds. Note, not all formats support this and unpredictable things may happen if it is used when not supported. - encoding: Set by user - decoding: unused + + + Max chunk time in microseconds. Note, not all formats support this and unpredictable things may happen if it is used when not supported. - encoding: Set by user - decoding: unused + + + Max chunk size in bytes Note, not all formats support this and unpredictable things may happen if it is used when not supported. - encoding: Set by user - decoding: unused + + + forces the use of wallclock timestamps as pts/dts of packets This has undefined results in the presence of B frames. - encoding: unused - decoding: Set by user + + + avio flags, used to force AVIO_FLAG_DIRECT. - encoding: unused - decoding: Set by user + + + The duration field can be estimated through various ways, and this field can be used to know how the duration was estimated. - encoding: unused - decoding: Read by user + + + Skip initial bytes when opening stream - encoding: unused - decoding: Set by user + + + Correct single timestamp overflows - encoding: unused - decoding: Set by user + + + Force seeking to any (also non key) frames. - encoding: unused - decoding: Set by user + + + Flush the I/O context after each packet. - encoding: Set by user - decoding: unused + + + format probing score. The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes the format. - encoding: unused - decoding: set by avformat, read by user + + + number of bytes to read maximally to identify format. - encoding: unused - decoding: set by user + + + ',' separated list of allowed decoders. If NULL then all are allowed - encoding: unused - decoding: set by user + + + ',' separated list of allowed demuxers. If NULL then all are allowed - encoding: unused - decoding: set by user + + + An opaque field for libavformat internal usage. Must not be accessed in any way by callers. + + + IO repositioned flag. This is set by avformat when the underlaying IO context read pointer is repositioned, for example when doing byte based seeking. Demuxers can use the flag to detect such changes. + + + Forced video codec. This allows forcing a specific decoder, even when there are multiple with the same codec_id. Demuxing: Set by user + + + Forced audio codec. This allows forcing a specific decoder, even when there are multiple with the same codec_id. Demuxing: Set by user + + + Forced subtitle codec. This allows forcing a specific decoder, even when there are multiple with the same codec_id. Demuxing: Set by user + + + Forced data codec. This allows forcing a specific decoder, even when there are multiple with the same codec_id. Demuxing: Set by user + + + Number of bytes to be written as padding in a metadata header. Demuxing: Unused. Muxing: Set by user via av_format_set_metadata_header_padding. + + + User data. This is a place for some private data of the user. + + + Callback used by devices to communicate with application. + + + Output timestamp offset, in microseconds. Muxing: set by user + + + dump format separator. can be ", " or " " or anything else - muxing: Set by user. - demuxing: Set by user. + + + Forced Data codec_id. Demuxing: Set by user. + + + Called to open further IO contexts when needed for demuxing. + + + ',' separated list of allowed protocols. - encoding: unused - decoding: set by user + + + A callback for closing the streams opened with AVFormatContext.io_open(). + + + ',' separated list of disallowed protocols. - encoding: unused - decoding: set by user + + + The maximum number of streams. - encoding: unused - decoding: set by user + + + @{ + + + A comma separated list of short names for the format. New names may be appended with a minor bump. + + + Descriptive name for the format, meant to be more human-readable than name. You should use the NULL_IF_CONFIG_SMALL() macro to define it. + + + Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + + + If extensions are defined, then no probe is done. You should usually not use extension format guessing because it is not reliable enough + + + AVClass for the private context + + + Comma-separated list of mime types. It is used check for matching mime types while probing. + + + *************************************************************** No fields below this line are part of the public API. They may not be used outside of libavformat and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + Raw demuxers store their codec ID here. + + + Size of private data so that it can be allocated in the wrapper. + + + Tell if a given file has a chance of being parsed as this format. The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes big so you do not have to check for that unless you need more. + + + Read the format header and initialize the AVFormatContext structure. Return 0 if OK. 'avformat_new_stream' should be called to create new streams. + + + Read one packet and put it in 'pkt'. pts and flags are also set. 'avformat_new_stream' can be called only if the flag AVFMTCTX_NOHEADER is used and only in the calling thread (not in a background thread). + + + Close the stream. The AVFormatContext and AVStreams are not freed by this function + + + Seek to a given timestamp relative to the frames in stream component stream_index. + + + Get the next timestamp in stream[stream_index].time_base units. + + + Start/resume playing - only meaningful if using a network-based format (RTSP). + + + Pause playing - only meaningful if using a network-based format (RTSP). + + + Seek to timestamp ts. Seeking will be done so that the point from which all active streams can be presented successfully will be closest to ts and within min/max_ts. Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + + + Returns device list with it properties. + + + Initialize device capabilities submodule. + + + Free device capabilities submodule. + + + List of devices. + + + list of autodetected devices + + + number of autodetected devices + + + index of default device or -1 if no default + + + Structure describes device capabilities. + + + Bytestream IO Context. New fields can be added to the end with minor version bumps. Removal, reordering and changes to existing fields require a major version bump. sizeof(AVIOContext) must not be used outside libav*. + + + A class for private options. + + + Start of the buffer. + + + Maximum buffer size + + + Current position in the buffer + + + End of the data, may be less than buffer+buffer_size if the read function returned less data than requested, e.g. for streams where no more data has been received yet. + + + A private pointer, passed to the read/write/seek/... functions. + + + position in the file of the current buffer + + + true if the next seek should flush + + + true if eof reached + + + true if open for writing + + + contains the error code or 0 if no error happened + + + Pause or resume playback for network streaming protocols - e.g. MMS. + + + Seek to a given timestamp in stream with the specified stream_index. Needed for some network streaming protocols which don't support seeking to byte position. + + + A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + + + max filesize, used to limit allocations This field is internal to libavformat and access from outside is not allowed. + + + avio_read and avio_write should if possible be satisfied directly instead of going through a buffer, and avio_seek will always call the underlying seek function directly. + + + Bytes read statistic This field is internal to libavformat and access from outside is not allowed. + + + seek statistic This field is internal to libavformat and access from outside is not allowed. + + + writeout statistic This field is internal to libavformat and access from outside is not allowed. + + + Original buffer size used internally after probing and ensure seekback to reset the buffer size This field is internal to libavformat and access from outside is not allowed. + + + Threshold to favor readahead over seek. This is current internal only, do not use from outside. + + + ',' separated list of allowed protocols. + + + ',' separated list of disallowed protocols. + + + A callback that is used instead of write_packet. + + + If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT, but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly small chunks of data returned from the callback). + + + Internal, not meant to be used from outside of AVIOContext. + + + A callback that is used instead of short_seek_threshold. This is current internal only, do not use from outside. + + + Callback for checking whether to abort blocking functions. AVERROR_EXIT is returned in this case by the interrupted function. During blocking operations, callback is called with opaque as parameter. If the callback returns 1, the blocking operation will be aborted. + + + Describes single entry of the directory. + + + Filename + + + Type of the entry + + + Set to 1 when name is encoded with UTF-8, 0 otherwise. Name can be encoded with UTF-8 even though 0 is set. + + + File size in bytes, -1 if unknown. + + + Time of last modification in microseconds since unix epoch, -1 if unknown. + + + Time of last access in microseconds since unix epoch, -1 if unknown. + + + Time of last status change in microseconds since unix epoch, -1 if unknown. + + + User ID of owner, -1 if unknown. + + + Group ID of owner, -1 if unknown. + + + Unix file mode, -1 if unknown. + + + An instance of a filter + + + needed for av_log() and filters common options + + + the AVFilter of which this is an instance + + + name of this filter instance + + + array of input pads + + + array of pointers to input links + + + number of input pads + + + array of output pads + + + array of pointers to output links + + + number of output pads + + + private data for use by the filter + + + filtergraph this filter belongs to + + + Type of multithreading being allowed/used. A combination of AVFILTER_THREAD_* flags. + + + An opaque struct for libavfilter internal use. + + + enable expression string + + + parsed expression (AVExpr*) + + + variable values for the enable expression + + + the enabled state from the last expression evaluation + + + For filters which will create hardware frames, sets the device the filter should create them in. All other filters will ignore this field: in particular, a filter which consumes or processes hardware frames will instead use the hw_frames_ctx field in AVFilterLink to carry the hardware context information. + + + Max number of threads allowed in this filter instance. If <= 0, its value is ignored. Overrides global number of threads set per filter graph. + + + Ready status of the filter. A non-0 value means that the filter needs activating; a higher value suggests a more urgent activation. + + + Filter definition. This defines the pads a filter contains, and all the callback functions used to interact with the filter. + + + Filter name. Must be non-NULL and unique among filters. + + + A description of the filter. May be NULL. + + + List of inputs, terminated by a zeroed element. + + + List of outputs, terminated by a zeroed element. + + + A class for the private data, used to declare filter private AVOptions. This field is NULL for filters that do not declare any options. + + + A combination of AVFILTER_FLAG_* + + + Filter initialization function. + + + Should be set instead of want to pass a dictionary of AVOptions to nested contexts that are allocated during init. + + + Filter uninitialization function. + + + Query formats supported by the filter on its inputs and outputs. + + + size of private data to allocate for the filter + + + Additional flags for avfilter internal use only. + + + Used by the filter registration system. Must not be touched by any other code. + + + Make the filter instance process a command. + + + Filter initialization function, alternative to the init() callback. Args contains the user-supplied parameters, opaque is used for providing binary data. + + + Filter activation function. + + + A link between two filters. This contains pointers to the source and destination filters between which this link exists, and the indexes of the pads involved. In addition, this link also contains the parameters which have been negotiated and agreed upon between the filter, such as image dimensions, format, etc. + + + source filter + + + output pad on the source filter + + + dest filter + + + input pad on the dest filter + + + filter media type + + + agreed upon image width + + + agreed upon image height + + + agreed upon sample aspect ratio + + + channel layout of current buffer (see libavutil/channel_layout.h) + + + samples per second + + + agreed upon media format + + + Define the time base used by the PTS of the frames/samples which will pass through this link. During the configuration stage, each filter is supposed to change only the output timebase, while the timebase of the input link is assumed to be an unchangeable property. + + + *************************************************************** All fields below this line are not part of the public API. They may not be used outside of libavfilter and can be changed and removed at will. New public fields should be added right above. **************************************************************** + + + Lists of channel layouts and sample rates used for automatic negotiation. + + + Audio only, the destination filter sets this to a non-zero value to request that buffers with the given number of samples should be sent to it. AVFilterPad.needs_fifo must also be set on the corresponding input pad. Last buffer before EOF will be padded with silence. + + + Graph the filter belongs to. + + + Current timestamp of the link, as defined by the most recent frame(s), in link time_base units. + + + Current timestamp of the link, as defined by the most recent frame(s), in AV_TIME_BASE units. + + + Index in the age array. + + + Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0, will be automatically copied from the first input of the source filter if it exists. + + + Buffer partially filled with samples to achieve a fixed/minimum size. + + + Size of the partial buffer to allocate. Must be between min_samples and max_samples. + + + Minimum number of samples to filter at once. If filter_frame() is called with fewer samples, it will accumulate them in partial_buf. This field and the related ones must not be changed after filtering has started. If 0, all related fields are ignored. + + + Maximum number of samples to filter at once. If filter_frame() is called with more samples, it will split them. + + + Number of channels. + + + Link processing flags. + + + Number of past frames sent through the link. + + + Number of past frames sent through the link. + + + A pointer to a FFFramePool struct. + + + True if a frame is currently wanted on the output of this filter. Set when ff_request_frame() is called by the output, cleared when a frame is filtered. + + + For hwaccel pixel formats, this should be a reference to the AVHWFramesContext describing the frames. + + + Internal structure members. The fields below this limit are internal for libavfilter's use and must in no way be accessed by applications. + + + sws options to use for the auto-inserted scale filters + + + libavresample options to use for the auto-inserted resample filters + + + Type of multithreading allowed for filters in this graph. A combination of AVFILTER_THREAD_* flags. + + + Maximum number of threads used by filters in this graph. May be set by the caller before adding any filters to the filtergraph. Zero (the default) means that the number of threads is determined automatically. + + + Opaque object for libavfilter internal use. + + + Opaque user data. May be set by the caller to an arbitrary value, e.g. to be used from callbacks like Libavfilter will not touch this field in any way. + + + This callback may be set by the caller immediately after allocating the graph and before adding any filters to it, to provide a custom multithreading implementation. + + + swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + + Private fields + + + A linked-list of the inputs/outputs of the filter chain. + + + unique name for this input/output in the list + + + filter context associated to this input/output + + + index of the filt_ctx pad to use for linking + + + next input/input in the list, NULL if this is the last + + + This structure contains the parameters describing the frames that will be passed to this filter. + + + video: the pixel format, value corresponds to enum AVPixelFormat audio: the sample format, value corresponds to enum AVSampleFormat + + + The timebase to be used for the timestamps on the input frames. + + + Video only, the display dimensions of the input frames. + + + Video only, the display dimensions of the input frames. + + + Video only, the sample (pixel) aspect ratio. + + + Video only, the frame rate of the input video. This field must only be set to a non-zero value if input stream has a known constant framerate and should be left at its initial value if the framerate is variable or unknown. + + + Video with a hwaccel pixel format only. This should be a reference to an AVHWFramesContext instance describing the input frames. + + + Audio only, the audio sampling rate in samples per secon. + + + Audio only, the audio channel layout + + + Struct to use for initializing a buffersink context. + + + list of allowed pixel formats, terminated by AV_PIX_FMT_NONE + + + Struct to use for initializing an abuffersink context. + + + list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + + + list of allowed channel layouts, terminated by -1 + + + list of allowed channel counts, terminated by -1 + + + if not 0, accept any channel count or layout + + + list of allowed sample rates, terminated by -1 + + + Structure describes basic parameters of the device. + + + device name, format depends on device + + + human friendly name + + + x coordinate of top left corner + + + y coordinate of top left corner + + + width + + + height + + + diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/ffme.dll b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/ffme.dll new file mode 100644 index 0000000..831d4b0 Binary files /dev/null and b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/ffme.dll differ diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/ffme.xml b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/ffme.xml new file mode 100644 index 0000000..e3cd18b --- /dev/null +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/References/ffme.xml @@ -0,0 +1,6771 @@ + + + + ffme + + + + + Implements the logic to close a media stream. + + + + + + Initializes a new instance of the class. + + The media element. + + + + Executes this command. + + + + + Implements the logic to open a media stream. + + + + + + Initializes a new instance of the class. + + The manager. + The source. + + + + Gets the source uri of the media stream. + + + + + Performs the actions that this command implements. + + + + + Creates a new instance of the renderer of the given type. + + Type of the media. + The renderer that was created + mediaType has to be of a vild type + + + + Implements the logic to pause the media stream + + + + + + Initializes a new instance of the class. + + The manager. + + + + Performs the actions that this command implements. + + + + + Implements the logic to start or resume media playback + + + + + + Initializes a new instance of the class. + + The media element. + + + + Performs the actions that this command implements. + + + + + Represents a command to be executed against an intance of the MediaElement + + + + + Set when the command has finished execution. + Do not use this field directly. It is managed internally by the command manager. + + + + + Initializes a new instance of the class. + + The command manager. + Type of the command. + + + + Gets the associated parent command manager + + + + + Gets the type of the command. + + + + + Gets a value indicating whether this command is marked as completed. + + + + + Marks the command as completed. + + + + + Executes the code for the command + + + + + Performs the actions that this command implements. + + + + + Represents a singlo point of contact for media command excution. + + + + + Initializes a new instance of the class. + + The media element. + + + + Gets the number of commands pending execution. + + + + + Gets the parent media element. + + + + + Opens the specified URI. + The command is processed in a Thread Pool Thread. + + The URI. + + + + Starts playing the open media URI. + + + + + Pauses the media. + + + + + Pauses and rewinds the media + + + + + Seeks to the specified position within the media. + + The position. + + + + Closes the specified media. + This command gets processed in a threadpool thread. + + + + + Sets the playback speed ratio. + + The target speed ratio. + + + + Processes the next command in the command queue. + This method is called in every block rendering cycle. + + + + + Gets the pending count of the given command type. + + The t. + The amount of commands of the given type + + + + Enqueues the command for execution. + + The command. + + + + Waits for the command to complete execution. + + The command. + + + + Calls the execution of the given command instance + and wait for its completion without blocking the dispatcher + + The command. + + + + Enumerates the different available Media Command Types + + + + + The open command + + + + + The seek command + + + + + The play command + + + + + The pause command + + + + + The stop command + + + + + The close command + + + + + The set speed ratio command + + + + + Implements the logic to seek on the media stream + + + + + + Initializes a new instance of the class. + + The media element. + The target position. + + + + Gets or sets the target position. + + + The target position. + + + + + Performs the actions that this command implements. + + + + + A command to change speed ratio asynchronously + + + + + + Initializes a new instance of the class. + + The manager. + The speed ratio. + + + + The target speed ratio + + + + + Performs the actions that this command implements. + + + + + Implements the logic to pause and rewind the media stream + + + + + + Initializes a new instance of the class. + + The media element. + + + + Performs the actions that this command implements. + + + + + Fast, atomioc boolean combining interlocked to write value and volatile to read values + Idea taken from Memory model and .NET operations in article: + http://igoro.com/archive/volatile-keyword-in-c-memory-model-explained/ + + + + + Initializes a new instance of the class. + + + + + Gets the latest value written by any of the processors in the machine + Setting + + + + + Fast, atomioc double combining interlocked to write value and volatile to read values + Idea taken from Memory model and .NET operations in article: + http://igoro.com/archive/volatile-keyword-in-c-memory-model-explained/ + + + + + Initializes a new instance of the class. + + + + + Gets or sets the latest value written by any of the processors in the machine + + + + + Fast, atomioc long combining interlocked to write value and volatile to read values + Idea taken from Memory model and .NET operations in article: + http://igoro.com/archive/volatile-keyword-in-c-memory-model-explained/ + + + + + Initializes a new instance of the class. + + + + + Gets or sets the latest value written by any of the processors in the machine + + + + + Manual additions to API calls not available in FFmpeg.Autogen + + + + + Gets the FFmpeg error mesage based on the error code + + The code. + The decoded error message + + + + A reference counter to keep track of unmanaged objects + + + + + The synchronization lock + + + + + The current reference counter instance + + + + + The instances + + + + + The types of tracked unmanaged types + + + + + The packet + + + + + The frame + + + + + The filter graph + + + + + The SWR context + + + + + The codec context + + + + + The SWS context + + + + + Gets the singleton instance of the reference counter + + + + + Gets the number of instances by location. + + + + + Adds the specified unmanaged object reference. + + The t. + The r. + The location. + + + + Removes the specified unmanaged object reference + + The PTR. + + + + Removes the specified unmanaged object reference. + + The unmanaged object reference. + + + + Adds the specified packet. + + The packet. + The location. + + + + Adds the specified context. + + The context. + The location. + + + + Adds the specified context. + + The context. + The location. + + + + Adds the specified codec. + + The codec. + The location. + + + + Adds the specified frame. + + The frame. + The location. + + + + Adds the specified filtergraph. + + The filtergraph. + The location. + + + + A reference entry + + + + + Represents a generic Logger + + The sender's concrete type + + + + + Initializes a new instance of the class. + + The sender. + + + + Holds a reference to the sender. + + + + + Logs the specified message. + + Type of the message. + The message. + + + + A very simple and standard interface for message logging + + + + + Logs the specified message of the given type. + + Type of the message. + The message. + + + + Represents a very simple dictionary for MediaType keys + + The type of the value. + + + + Initializes a new instance of the class. + + + + + Gets or sets the item with the specified key. + return the default value of the value type when the key does not exist. + + The key. + The item + + + + FFmpeg Registration Native Methods + + + + + Sets the DLL directory in which external dependencies can be located. + + the full path. + True if set, false if not set + + + + Fast pointer memory block copy function + + The destination. + The source. + The length. + + + + Fills the memory. + + The destination. + The length. + The fill. + + + + Provides helpers tor un code in different modes on the UI dispatcher. + + + + + Gets the UI dispatcher. + + + + + Synchronously invokes the given instructions on the main application dispatcher. + + The priority. + The action. + + + + Enqueues the given instructions with the given arguments on the main application dispatcher. + This is a way to execute code in a fire-and-forget style + + The priority. + The action. + The arguments. + + + + Exits the execution frame. + + The f. + Always a null value + + + + A fixed-size buffer that acts as an infinite length one. + This buffer is backed by unmanaged, very fast memory so ensure you call + the dispose method when you are donde using it. + + + + + + The locking object to perform synchronization. + + + + + To detect redundant calls + + + + + The unbmanaged buffer + + + + + Initializes a new instance of the class. + + Length of the buffer. + + + + Finalizes an instance of the class. + + + + + Gets the capacity of this buffer. + + + + + Gets the current, 0-based read index + + + + + Gets the maximum rewindable amount of bytes. + + + + + Gets the current, 0-based write index. + + + + + Gets an the object associated with the last write + + + + + Gets the available bytes to read. + + + + + Gets the number of bytes that can be written. + + + + + Gets percentage of used bytes (readbale/available, from 0.0 to 1.0). + + + + + Skips the specified amount requested bytes to be read. + + The requested bytes. + When requested bytes GT readable count + + + + Rewinds the read position by specified requested amount of bytes. + + The requested bytes. + When requested GT rewindable + + + + Reads the specified number of bytes into the target array. + + The requested bytes. + The target. + The target offset. + When requested GT readble + + + + Writes data to the backing buffer using the specified pointer and length. + and associating a write tag for this operation. + + The source. + The length. + The write tag. + if set to true, overwrites the data even if it has not been read. + Read + When read needs to be called more! + + + + Resets all states as if this buffer had just been created. + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + A time measurement artifact. + + + + + Initializes a new instance of the class. + The clock starts poaused and at the 0 position. + + + + + Gets or sets the clock position. + + + + + Gets a value indicating whether the clock is running. + + + + + Gets or sets the speed ratio at which the clock runs. + + + + + Starts or resumes the clock. + + + + + Pauses the clock. + + + + + Sets the clock position to 0 and stops it. + The speed ratio is not modified. + + + + + Defines library-wide constants + + + + + Determines if the av_lockmgr_register is called. + If this is set to false, then the number of threads will be set to 1. + + + + + Contains audio format properties essential + to audio resampling + + + + + The standard output audio spec + + + + + Initializes static members of the class. + + + + + Prevents a default instance of the class from being created. + + + + + Initializes a new instance of the class. + + The frame. + + + + Gets the channel count. + + + + + Gets the channel layout. + + + + + Gets the samples per channel. + + + + + Gets the audio sampling rate. + + + + + Gets the sample format. + + + + + Gets the length of the buffer required to store + the samples in the current format. + + + + + Creates a source audio spec based on the info in the given audio frame + + The frame. + The audio parameters + + + + Creates a target audio spec using the sample quantities provided + by the given source audio frame + + The frame. + The audio parameters + + + + Determines if the audio specs are compatible between them. + They must share format, channel count, layout and sample rate + + a. + The b. + True if the params are compatible, flase otherwise. + + + + A single codec option along with a stream specifier. + + + + + Initializes a new instance of the class. + + The spec. + The key. + The value. + + + + Gets or sets the stream specifier. + + + + + Gets or sets the option name + + + + + Gets or sets the option value. + + + + + Enumerates the different Media Types + + + + + Represents an unexisting media type (-1) + + + + + The video media type (0) + + + + + The audio media type (1) + + + + + The subtitle media type (3) + + + + + An AVDictionaryEntry wrapper + + + + + Initializes a new instance of the class. + + The entry pointer. + + + + Gets the key. + + + + + Gets the value. + + + + + An AVDictionary management class + + + + + To detect redundant Dispose calls + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + The other. + + + + Gets the number of elements in the dictionary + + + The count. + + + + + Gets or sets the value with the specified key. + + + The . + + The key. + The entry + + + + Converts the AVDictionary to a regular dictionary. + + The dictionary to convert from. + the converterd dictionary + + + + A wrapper for the av_dict_get method + + The dictionary. + The key. + if set to true [match case]. + The Entry + + + + Fills this dictionary with a set of options + + The other dictionary (source) + + + + Gets the first entry. Null if no entries. + + The entry + + + + Gets the next entry based on the provided prior entry. + + The prior entry. + The entry + + + + Determines if the given key exists in the dictionary + + The key. + if set to true [match case]. + True or False + + + + Gets the entry given the key. + + The key. + if set to true [match case]. + The entry + + + + Gets the value with specified key. + + The key. + The value + + + + Sets the value for the specified key. + + The key. + The value. + + + + Sets the value for the specified key. + + The key. + The value. + if set to true [dont overwrite]. + + + + Removes the entry with the specified key. + + The key. + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + A managed representation of an FFmpeg stream specifier + + + + + Initializes a new instance of the class. + + + + + Initializes a new instance of the class. + + The stream identifier. + streamId + + + + Initializes a new instance of the class. + + Type of the media. + streamType + + + + Initializes a new instance of the class. + + Type of the media. + The stream identifier. + + streamType + or + streamId + + + + + Provides suffixes for the different media types. + + + + + Gets the stream identifier. + + + + + Gets the stream suffix. + + + + + Returns a that represents this stream specifier. + + + A that represents this instance. + + + + + Provides a set of utilities to perfrom logging, text formatting, + conversion and other handy calculations. + + + + + Initializes static members of the class. + + + + + Determines if we are currently in Design Time + + + true if this instance is in design time; otherwise, false. + + + + + Gets a value indicating whether this instance is in debug mode. + + + + + Gets the assembly location. + + + + + Converts a byte pointer to a string + + The byte PTR. + The string + + + + Converts a byte pointer to a UTF8 encoded string. + + The byte PTR. + The string + + + + Converts the given value to a value that is of the given multiple. + + The value. + The multiple. + The value + + + + Gets a timespan given a timestamp and a timebase. + + The PTS. + The time base. + The TimeSpan + + + + Gets a timespan given a timestamp and a timebase. + + The PTS. + The time base. + The TimeSpan + + + + Gets a timespan given a timestamp and a timebase. + + The PTS in seconds. + The time base. + The TimeSpan + + + + Gets a timespan given a timestamp and a timebase. + + The PTS. + The time base. + The TimeSpan + + + + Gets a timespan given a timestamp (in AV_TIME_BASE units) + + The PTS. + The TimeSpan + + + + Gets a timespan given a timestamp (in AV_TIME_BASE units) + + The PTS. + The TimeSpan + + + + Converts a fraction to a double + + The rational. + The value + + + + Registers FFmpeg library and initializes its components. + It only needs to be called once but calling it more than once + has no effect. Returns the path that FFmpeg was registered from. + + The override path. + Returns the path that FFmpeg was registered from. + When the folder is not found + + + + Logs the specified message. + + The sender. + Type of the message. + The message. + sender + + + + Logs a block rendering operation as a Trace Message + if the debugger is attached. + + The media element. + The block. + The clock position. + Index of the render. + + + + Returns a formatted timestamp string in Seconds + + The ts. + The formatted string + + + + Returns a formatted string with elapsed milliseconds between now and + the specified date. + + The dt. + The formatted string + + + + Returns a fromatted string, dividing by the specified + factor. Useful for debugging longs with byte positions or sizes. + + The ts. + The divide by. + The formatted string + + + + Strips the SRT format and returns plain text. + + The input. + The formatted string + + + + Strips a line of text from the ASS format. + + The input. + The formatted string + + + + Handles the Tick event of the LogOutputter timer. + + The source of the event. + The instance containing the event data. + + + + Manages FFmpeg Multithreaded locking + + The mutex. + The op. + + 0 for success, 1 for error + + + + + Log message callback from ffmpeg library. + + The p0. + The level. + The format. + The vl. + + + + Enumerates the differen Closed-Captioning Colors + + + + + No color + + + + + The white color + + + + + The white transparent color + + + + + The green color + + + + + The green transparent color + + + + + The blue color + + + + + The blue transparent color + + + + + The cyan color + + + + + The cyan transparent color + + + + + The red color + + + + + The red transparent color + + + + + The yellow color + + + + + The yellow transparent color + + + + + The magenta color + + + + + The magenta transparent color + + + + + The white italics color + + + + + The white italics transparent color + + + + + The background transparent color + + + + + The foreground black color + + + + + The foreground black underline color + + + + + Enumerates the Closed-Captioning misc commands + + + + + No command + + + + + The resume command + + + + + The backspace command + + + + + The alarm off command + + + + + The alarm on command + + + + + The clear line command + + + + + The roll up2 command + + + + + The roll up3 command + + + + + The roll up4 command + + + + + The start caption command + + + + + The star non caption command + + + + + The resume non caption command + + + + + The clear screen command + + + + + The new line command + + + + + The clear buffer command + + + + + The end caption command + + + + + Defines Closed-Captioning Packet types + + + + + The unrecognized packet type + + + + + The null pad packet type + + + + + The XDS class packet type + + + + + The misc command packet type + + + + + The text packet type + + + + + The mid row packet type + + + + + The preamble packet type + + + + + The color packet type + + + + + The charset packet type + + + + + The tabs packet type + + + + + Enumerates the differen Closed-Captioning Styles + + + + + The none style + + + + + The white style + + + + + The white underline style + + + + + The green style + + + + + The green underline style + + + + + The blue style + + + + + The blue underline style + + + + + The cyan style + + + + + The cyan underline style + + + + + The red style + + + + + The red underline style + + + + + The yellow style + + + + + The yellow underline style + + + + + The magenta style + + + + + The magenta underline style + + + + + The white italics style + + + + + The white italics underline style + + + + + The white indent0 style + + + + + The white indent0 underline style + + + + + The white indent4 style + + + + + The white indent4 underline style + + + + + The white indent8 style + + + + + The white indent8 underline style + + + + + The white indent12 style + + + + + The white indent12 underline style + + + + + The white indent16 style + + + + + The white indent16 underline style + + + + + The white indent20 style + + + + + The white indent20 underline style + + + + + The white indent24 style + + + + + The white indent24 underline style + + + + + The white indent28 style + + + + + The white indent28 underline style + + + + + Defines Closed-Captioning XDS Packet Classes + + + + + The none XDS Class + + + + + The current start XDS Class + + + + + The current continue XDS Class + + + + + The future start XDS Class + + + + + The future continue XDS Class + + + + + The channel start XDS Class + + + + + The channel continue XDS Class + + + + + The misc start XDS Class + + + + + The misc continue XDS Class + + + + + The public service start XDS Class + + + + + The public service continue XDS Class + + + + + The reserved start XDS Class + + + + + The reserved continue XDS Class + + + + + The private start XDS Class + + + + + The private continue XDS Class + + + + + The end all XDS Class + + + + + Represents a set of Closed Captioning Tracks + in a stream of CC packets. + + + + + The CC1 Track Packets + + + + + The CC2 Track Packets + + + + + The CC3 Track Packets + + + + + The CC4 Track Packets + + + + + Adds the specified packet and automatically places it on the right track. + If the track requires sorting it does so by reordering packets based on their timestamp. + + The item. + + + + Represents a 3-byte packet of closed-captioning data in EIA-608 format. + See: http://jackyjung.tistory.com/attachment/499e14e28c347DB.pdf + + + + + Holds the data bytes + + + + + Initializes a new instance of the class. + + The timestamp. + The source. + The offset. + + + + Initializes a new instance of the class. + + The timestamp. + The header. + The d0. + The d1. + + + + Gets the first of the two-byte packet data + + + + + Gets the second of the two-byte packet data + + + + + Gets the timestamp this packet applies to. + + + + + Gets the NTSC field (1 or 2). + 0 for unknown/null packet + + + + + Gets the channel. 0 for any, 1 or 2 for specific channel toggle. + 0 just means to use what a prior packet had specified. + + + + + Gets the type of the packet. + + + + + Gets the number of tabs, if the packet type is of Tabs + + + + + Gets the Misc Command, if the packet type is of Misc Command + + + + + Gets the Color, if the packet type is of Color + + + + + Gets the Style, if the packet type is of Mid Row Style + + + + + Gets the XDS Class, if the packet type is of XDS + + + + + Gets the Preamble Row Number (1 through 15), if the packet type is of Preamble + + + + + Gets the Style, if the packet type is of Preamble + + + + + Gets the text, if the packet type is of text. + + + + + Returns a that represents this instance. + + + A that represents this instance. + + + + + Compares the current instance with another object of the same type and returns an integer that indicates whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. + + An object to compare with this instance. + + A value that indicates the relative order of the objects being compared. The return value has these meanings: Value Meaning Less than zero This instance precedes in the sort order. Zero This instance occurs in the same position in the sort order as . Greater than zero This instance follows in the sort order. + + + + + Checks that the header byte starts with 11111b (5 ones binary) + + The data. + If header has markers + + + + Determines whether the valid flag of the header byte is set. + + The data. + + true if [is header valid falg set] [the specified data]; otherwise, false. + + + + + Gets the NTSC field type (1 or 2). + Returns 0 for unknown. + + The data. + The field type + + + + Determines whether the data is null padding + + The d0. + The d1. + + true if [is empty channel data] [the specified d0]; otherwise, false. + + + + + Drops the parity bit from the data byte. + + The input. + The byte without a parity bit. + + + + Converst an ASCII character code to an EIA-608 char (in Unicode) + + The input. + The charset char. + + + + The get format callback + + + + + Initializes static members of the class. + + + + + Prevents a default instance of the class from being created. + + + + + A dicitionary containing all Accelerators by pixel format + + + + + Gets the dxva2 accelerator. + + + + + Gets the hardware output pixel format. + + + + + Gets the type of the hardware device. + + + + + Attaches a hardware device context to the specified video component. + + The component. + Throws when unable to initialize the hardware device + + + + Detaches and disposes the hardware device context from the specified video component + + The component. + + + + Downloads the frame from the hardware into a software frame if possible. + The input hardware frame gets freed and the return value will point to the new software frame + + The codec context. + The input. + The frame downloaded from the device into RAM + Failed to transfer data to output frame + + + + Gets the pixel format. + Port of (get_format) method in ffmpeg.c + + The codec context. + The pixel formats. + The real pixel format that the codec will be using + + + + Enumerates the seek target requirement levels. + + + + + Seek requirement is satisfied when + the main component has frames in the seek range. + This is the fastest option. + + + + + Seek requirement is satisfied when + the both audio and video comps have frames in the seek range. + This is the recommended option. + + + + + Seek requirement is satisfied when + ALL components have frames in the seek range + This is NOT recommended as it forces large amounts of + frames to get decoded in subtitle files. + + + + + A scaled, preallocated audio frame container. + The buffer is in 16-bit signed, interleaved sample data + + + + + Finalizes an instance of the class. + + + + + Gets a pointer to the first byte of the data buffer. + The format signed 16-bits per sample, channel interleaved + + + + + Gets the length of the buffer in bytes. + + + + + Gets the sample rate. + + + + + Gets the channel count. + + + + + Gets the available samples per channel. + + + + + Gets the media type of the data + + + + + The picture buffer length of the last allocated buffer + + + + + Holds a reference to the last allocated buffer + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Represents a wrapper from an unmanaged FFmpeg audio frame + + + + + + + Initializes a new instance of the class. + + The frame. + The component. + + + + Finalizes an instance of the class. + + + + + Gets the type of the media. + + + + + Gets the pointer to the unmanaged frame. + + + + + Releases unmanaged and - optionally - managed resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Represents a set of preallocated media blocks of the same media type. + A block buffer contains playback and pool blocks. Pool blocks are blocks that + can be reused. Playback blocks are blocks that have been filled. + This class is thread safe. + + + + + The blocks that are available to be filled. + + + + + The blocks that are available for rendering. + + + + + Initializes a new instance of the class. + + The capacity. + Type of the media. + + + + Gets the media type of the block buffer. + + + + + Gets the start time of the first block. + + + + + Gets the end time of the last block. + + + + + Gets the range of time between the first block and the end time of the last block. + + + + + Gets the average duration of the currently available playback blocks. + + + + + Gets a value indicating whether all the durations of the blocks are equal + + + + + Gets the number of available playback blocks. + + + + + Gets the maximum count of this buffer. + + + + + Gets the usage percent from 0.0 to 1.0 + + + + + Gets a value indicating whether the playback blocks are all allocated. + + + + + Gets the at the specified index. + + + The . + + The index. + The media block + + + + Gets the at the specified timestamp. + + + The . + + At time. + The media block + + + + Gets the percentage of the range for the given time position. + + The position. + The percent of the range + + + + Retrieves the block following the provided current block + + The current block. + The next media block + + + + Adds a block to the playback blocks by converting the given frame. + If there are no more blocks in the pool, the oldest block is returned to the pool + and reused for the new block. The source frame is automatically disposed. + + The source. + The container. + The filled block. + + + + Clears all the playback blocks returning them to the + block pool. + + + + + Determines whether the given render time is within the range of playback blocks. + + The render time. + + true if [is in range] [the specified render time]; otherwise, false. + + + + + Retrieves the index of the playback block corresponding to the specified + render time. This uses very fast binary and linear search commbinations. + If there are no playback blocks it returns -1. + If the render time is greater than the range end time, it returns the last playback block index. + If the render time is less than the range start time, it returns the first playback block index. + + The render time. + The media block's index + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Returns a formatted string with information about this buffer + + The formatted string + + + + Block factory method. + + The media frame + MediaBlock + + + + Provides audio sample extraction, decoding and scaling functionality. + + + + + + Holds a reference to the audio resampler + This resampler gets disposed upon disposal of this object. + + + + + Used to determine if we have to reset the scaler parameters + + + + + Initializes a new instance of the class. + + The container. + Index of the stream. + + + + Gets the number of audio channels. + + + + + Gets the audio sample rate. + + + + + Gets the bits per sample. + + + + + Converts decoded, raw frame data in the frame source into a a usable frame.
+ The process includes performing picture, samples or text conversions + so that the decoded source frame data is easily usable in multimedia applications +
+ The source frame to use as an input. + The target frame that will be updated with the source frame. If null is passed the frame will be instantiated. + The sibling blocks that may help guess some additional parameters for the input frame. + + Return the updated output frame + + input +
+ + + Creates a frame source object given the raw FFmpeg frame reference. + + The raw FFmpeg frame pointer. + The media frame + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Destroys the filtergraph releasing unmanaged resources. + + + + + Computes the frame filter arguments that are appropriate for the audio filtering chain. + + The frame. + The base filter arguments + + + + If necessary, disposes the existing filtergraph and creates a new one based on the frame arguments. + + The frame. + + avfilter_graph_create_filter + or + avfilter_graph_create_filter + or + avfilter_link + or + avfilter_graph_parse + or + avfilter_graph_config + + + + + Represents a wrapper for an unmanaged frame. + Derived classes implement the specifics of each media type. + + + + + + Initializes a new instance of the class. + + The pointer. + The component. + + + + Gets the type of the media. + + + The type of the media. + + + + + Gets the start time of the frame. + + + + + Gets the end time of the frame + + + + + Gets the index of the stream from which this frame was decoded. + + + + + Gets the amount of time this data has to be presented + + + + + Gets or sets a value indicating whether this frame obtained its start time + form a valid frame pts value + + + + + When the unmanaged frame is released (freed from unmanaged memory) + this property will return true. + + + + + Gets the time base of the stream that generated this frame. + + + + + Compares the current instance with another object of the same type and returns an integer that indicates whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. + + An object to compare with this instance. + + A value that indicates the relative order of the objects being compared. The return value has these meanings: Value Meaning Less than zero This instance precedes in the sort order. Zero This instance occurs in the same position in the sort order as . Greater than zero This instance follows in the sort order. + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + A base class for blocks of the deifferent MediaTypes. + Blocks are the result of decoding and scaling a frame. + Blocks have preallocated buffers wich makes them memory and CPU efficient + Reue blocks as much as possible. Once you create a block from a frame, + you don't need the frame anymore so make sure you dispose the frame. + + + + + Gets the media type of the data + + + + + Gets or sets a value indicating whether the start time was guessed from siblings + or the source frame PTS comes from a NO PTS value + + + + + Gets the time at which this data should be presented (PTS) + + + + + Gets the amount of time this data has to be presented + + + + + Gets the end time. + + + + + Gets or sets the index of the stream. + + + + + Gets the middle timestamp between the start and end time. + Returns Zero if the duration is Zero or negative. + + + + + Determines whether this media block holds the specified position. + Returns false if it does not have a valid duration. + + The position. + + true if [contains] [the specified position]; otherwise, false. + + + + + Compares the current instance with another object of the same type and returns an integer that indicates whether the current instance precedes, follows, or occurs in the same position in the sort order as the other object. + + An object to compare with this instance. + + A value that indicates the relative order of the objects being compared. The return value has these meanings: Value Meaning Less than zero This instance precedes in the sort order. Zero This instance occurs in the same position in the sort order as . Greater than zero This instance follows in the sort order. + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Represents a media component of a given media type within a + media container. Derived classes must implement frame handling + logic. + + + + + + Holds a reference to the Codec Context. + + + + + Holds a reference to the associated input context stream + + + + + Contains the packets pending to be sent to the decoder + + + + + The packets that have been sent to the decoder. We keep track of them in order to dispose them + once a frame has been decoded. + + + + + Detects redundant, unmanaged calls to the Dispose method. + + + + + The m total bytes read + + + + + Initializes a new instance of the class. + + The container. + Index of the stream. + container + The container exception. + + + + Finalizes an instance of the class. + + + + + Gets the media container associated with this component. + + + + + Gets the type of the media. + + + + + Gets the index of the associated stream. + + + + + Returns the component's stream start timestamp as reported + by the start time of the stream. + + + + + Gets the duration of this stream component. + If there is no such information it will return TimeSpan.MinValue + + + + + Gets the current length in bytes of the + packet buffer. Limit your Reads to something reasonable before + this becomes too large. + + + + + Gets the number of packets in the queue. + Decode packets until this number becomes 0. + + + + + Gets the total amount of bytes read by this component. + + + + + Gets the ID of the codec for this component. + + + + + Gets the name of the codec for this component. + + + + + Gets the bitrate of this component as reported by the codec context. + Returns 0 for unknown. + + + + + Gets the stream information. + + + + + Clears the pending and sent Packet Queues releasing all memory held by those packets. + Additionally it flushes the codec buffered packets. + + + + + Sends a special kind of packet (an empty packet) + that tells the decoder to enter draining mode. + + + + + Pushes a packet into the decoding Packet Queue + and processes the packet in order to try to decode + 1 or more frames. The packet has to be within the range of + the start time and end time of + + The packet. + + + + Decodes the next packet in the packet queue in this media component. + Returns the decoded frames. + + The received Media Frames + + + + Converts decoded, raw frame data in the frame source into a a usable frame.
+ The process includes performing picture, samples or text conversions + so that the decoded source frame data is easily usable in multimedia applications +
+ The source frame to use as an input. + The target frame that will be updated with the source frame. If null is passed the frame will be instantiated. + The sibling blocks that may help guess some additional parameters for the input frame. + + Return the updated output frame + +
+ + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Determines whether the specified packet is a Null Packet (data = null, size = 0) + These null packets are used to read multiple frames from a single packet. + + The packet. + + true if [is empty packet] [the specified packet]; otherwise, false. + + + + + Creates a frame source object given the raw FFmpeg subtitle reference. + + The raw FFmpeg subtitle pointer. + The media frame + + + + Creates a frame source object given the raw FFmpeg frame reference. + + The raw FFmpeg frame pointer. + The media frame + + + + Releases the existing codec context and clears and disposes the packet queues. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Receives 0 or more frames from the next available packet in the Queue. + This sends the first available packet to dequeue to the decoder + and uses the decoded frames (if any) to their corresponding + ProcessFrame method. + + The list of frames + + + + Represents a set of Audio, Video and Subtitle components. + This class is useful in order to group all components into + a single set. Sending packets is automatically handled by + this class. This class is thread safe. + + + + + The internal Components + + + + + The synchronize lock + + + + + Provides a cached array to the components backing the All property. + + + + + To detect redundant Dispose calls + + + + + Initializes a new instance of the class. + + + + + Gets the available component media types. + + + + + Gets all the components in a read-only collection. + + + + + Gets the main media component of the stream to which time is synchronized. + By order of priority, first Audio, then Video + + + + + Gets the video component. + Returns null when there is no such stream component. + + + + + Gets the audio component. + Returns null when there is no such stream component. + + + + + Gets the subtitles component. + Returns null when there is no such stream component. + + + + + Gets the current length in bytes of the packet buffer. + These packets are the ones that have not been yet deecoded. + + + + + Gets the number of packets that have not been + fed to the decoders. + + + + + Gets the total bytes read by all components. + + + + + Gets a value indicating whether this instance has a video component. + + + + + Gets a value indicating whether this instance has an audio component. + + + + + Gets a value indicating whether this instance has a subtitles component. + + + + + Gets or sets the with the specified media type. + Setting a new component on an existing media type component will throw. + Getting a non existing media component fro the given media type will return null. + + Type of the media. + The media component + When the media type is invalid + MediaComponent + + + + Removes the component of specified media type (if registered). + It calls the dispose method of the media component too. + + Type of the media. + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Sends the specified packet to the correct component by reading the stream index + of the packet that is being sent. No packet is sent if the provided packet is set to null. + Returns the media type of the component that accepted the packet. + + The packet. + The media type + + + + Sends an empty packet to all media components. + When an EOF/EOS situation is encountered, this forces + the decoders to enter drainig mode untill all frames are decoded. + + + + + Clears the packet queues for all components. + Additionally it flushes the codec buffered packets. + This is useful after a seek operation is performed or a stream + index is changed. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + A subtitle frame container. Simply contains text lines. + + + + + Gets the media type of the data + + + + + Gets the lines of text for this subtitle frame with all formatting stripped out. + + + + + Gets the original text in SRT or ASS fromat. + + + + + Gets the type of the original text. + Returns None when it's a bitmap or when it's None + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Represents a wrapper for an unmanaged Subtitle frame. + TODO: Only text (ASS and SRT) subtitles are supported currently. + There is no support to bitmap subtitles. + + + + + + Initializes a new instance of the class. + + The frame. + The component. + + + + Finalizes an instance of the class. + + + + + Gets the type of the media. + + + + + Gets lines of text that the subtitle frame contains. + + + + + Gets the type of the text. + + + The type of the text. + + + + + Gets the pointer to the unmanaged subtitle struct + + + + + Releases unmanaged and - optionally - managed resources. + + + + + Allocates an AVSubtitle struct in unmanaged memory, + + The subtitle struct pointer + + + + Deallocates the subtitle struct used to create in managed memory. + + The frame. + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + A pre-allocated, scaled video block. The buffer is in BGR, 24-bit format + + + + + Finalizes an instance of the class. + + + + + Gets the media type of the data + + + + + Gets a pointer to the first byte of the data buffer. + The format is 24bit BGR + + + + + Gets the length of the buffer in bytes. + + + + + The picture buffer stride. + Pixel Width * 24-bit color (3 byes) + alignment (typically 0 for modern hw). + + + + + Gets the number of horizontal pixels in the image. + + + + + Gets the number of vertical pixels in the image. + + + + + Gets or sets the width of the aspect ratio. + + + + + Gets or sets the height of the aspect ratio. + + + + + Gets the SMTPE time code. + + + + + Gets the display picture number (frame number). + If not set by the decoder, this attempts to obtain it by dividing the start time by the + frame duration + + + + + Gets the coded picture number set by the decoder. + + + + + The picture buffer length of the last allocated buffer + + + + + Holds a reference to the last allocated buffer + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Represents a wrapper for an unmanaged ffmpeg video frame. + + + + + + Initializes a new instance of the class. + + The frame. + The component. + + + + Finalizes an instance of the class. + + + + + Gets the type of the media. + + + + + Gets the closed caption data collected from the frame in CEA-708/EAS-608 format. + + + + + Gets the display picture number (frame number). + If not set by the decoder, this attempts to obtain it by dividing the start time by the + frame duration + + + + + Gets the coded picture number set by the decoder. + + + + + Gets the SMTPE time code. + + + + + Gets the pointer to the unmanaged frame. + + + + + Releases unmanaged and - optionally - managed resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + A container capable of opening an input url, + reading packets from it, decoding frames, seeking, and pausing and resuming network streams + Code heavily based on https://raw.githubusercontent.com/FFmpeg/FFmpeg/release/3.2/ffplay.c + The method pipeline should be: + 1. Set Options (or don't, for automatic options) and Initialize, + 2. Perform continuous Reads, + 3. Perform continuous Decodes and Converts/Materialize + + + + + + The logger + + + + + Holds a reference to an input context. + + + + + The read synchronize root + + + + + The decode synchronize root + + + + + The convert synchronize root + + + + + Holds the set of components. + + + + + To detect redundat Dispose calls + + + + + Determines if the stream seeks by bytes always + + + + + Hold the value for the internal property with the same name. + Picture attachments are required when video streams support them + and these attached packets must be read before reading the first frame + of the stream and after seeking. + + + + + The stream read interrupt callback. + Used to detect read rimeouts. + + + + + The stream read interrupt start time. + When a read operation is started, this is set to the ticks of UTC now. + + + + + Initializes a new instance of the class. + + The media URL. + The logger. + + The protocol prefix. See https://ffmpeg.org/ffmpeg-protocols.html + Leave null if setting it is not intended. + mediaUrl + + + + Finalizes an instance of the class. + + + + + Gets the media URL. This is the input url, file or device that is read + by this container. + + + + + Gets the protocol prefix. + Typically async for local files and empty for other types. + + + + + The media initialization options. + Options are applied when calling the Initialize method. + After initialization, changing the options has no effect. + + + + + Provides stream, chapter and program info held by this container. + + + + + Gets the name of the media format. + + + + + Gets the media bitrate (bits per second). Returns 0 if not available. + + + + + Holds the metadata of the media file when the stream is initialized. + + + + + Gets a value indicating whether an Input Context has been initialize. + + + + + Gets a value indicating whether this instance is open. + + + + + Gets the duration of the media. + If this information is not available (i.e. realtime media) it will + be set to TimeSpan.MinValue + + + + + Will be set to true whenever an End Of File situation is reached. + + + + + Gets the byte position at which the stream is being read. + Please note that this property gets updated after every Read. + + + + + Gets a value indicating whether the underlying media is seekable. + + + + + Gets a value indicating whether this container represents realtime media. + If the format name is rtp, rtsp, or sdp or if the url starts with udp: or rtp: + then this property will be set to true. + + + + + Provides direct access to the individual Media components of the input stream. + + + + + Gets the media start time by which all component streams are offset. + Typically 0 but it could be something other than 0. + + + + + Gets the seek start timestamp. + + + + + Gets the time the last packet was read from the input + + + + + For RTSP and other realtime streams reads can be suspended. + + + + + For RTSP and other realtime streams reads can be suspended. + This property will return true if reads have been suspended. + + + + + Gets a value indicating whether a packet read delay witll be enforced. + RSTP formats or MMSH Urls will have this property set to true. + Reading packets will block for at most 10 milliseconds depending on the last read time. + This is a hack according to the source code in ffplay.c + + + + + Picture attachments are required when video streams support them + and these attached packets must be read before reading the first frame + of the stream and after seeking. This property is not part of the public API + and is meant more for internal purposes + + + + + Opens the individual stram components on the existing input context in order to start reading packets. + Any Media Options must be set before this method is called. + + + + + Seeks to the specified position in the stream. This method attempts to do so as + precisely as possible, returning decoded frames of all available media type components + just before or right on the requested position. The position must be given in 0-based time, + so it converts component stream start time offset to absolute, 0-based time. + Pass TimeSpan.Zero to seek to the beginning of the stream. + + The position. + The list of media frames + + + + Reads the next available packet, sending the packet to the corresponding + internal media component. It also sets IsAtEndOfStream property. + Returns the media type if the packet was accepted by any of the media components. + Returns None if the packet was not accepted by any of the media components + or if reading failed (i.e. End of stream already or read error). + Packets are queued internally. To dequeue them you need to call the receive frames + method of each component until the packet buffer count becomes 0. + + The media type of the packet that was read + No input context initialized + When a read error occurs + + + + Decodes the next available packet in the packet queue for each of the components. + Returns the list of decoded frames. You can call this method until the Components.PacketBufferCount + becomes 0; The list of 0 or more decoded frames is returned in ascending StartTime order. + A Packet may contain 0 or more frames. Once the frame source objects are returned, you + are responsible for calling the Dispose method on them to free the underlying FFmpeg frame. + Note that even after releasing them you can still use the managed properties. + If you intend on Converting the frames to usable media frames (with Convert) you must not + release the frame. Specify the release input argument as true and the frame will be automatically + freed from memory. + + The list of media frames + + + + Performs audio, video and subtitle conversions on the decoded input frame so data + can be used as a Frame. Please note that if the output is passed as a reference. + This works as follows: if the output reference is null it will be automatically instantiated + and returned by this function. This enables to either instantiate or reuse a previously allocated Frame. + This is important because buffer allocations are exepnsive operations and this allows you + to perform the allocation once and continue reusing thae same buffer. + + The raw frame source. Has to be compatiable with the target. (e.g. use VideoFrameSource to conver to VideoFrame) + The target frame. Has to be compatible with the source. + The siblings that may help guess additional output parameters. + if set to true releases the raw frame source from unmanaged memory. + + The media block + + No input context initialized + MediaType + input + input + or + input + + + + Closes the input context immediately releasing all resources. + This method is equivalent to calling the dispose method. + + + + + Releases unmanaged and - optionally - managed resources. + + + + + Initializes the input context to start read operations. + This does NOT create the stream components and therefore, there needs to be a call + to the Open method. + + The input context has already been initialized. + When an error initializing the stream occurs. + + + + Opens the individual stream components to start reading packets. + + + + + Creates the stream components by first finding the best available streams. + Then it initializes the components of the correct type each. + + The exception ifnromation + + + + The interrupt callback to handle stream reading timeouts + + A pointer to the format input context + 0 for OK, 1 for error (timeout) + + + + Reads the next packet in the underlying stream and enqueues in the corresponding media component. + Returns None of no packet was read. + + The type of media packet that was read + Initialize + Raised when an error reading from the stream occurs. + + + + Suspends / pauses network streams + This should only be called upon Dispose + + + + + Resumes the reads of network streams + + + + + Drops the seek frames that are no longer needed. + Target time should be provided in absolute, 0-based time + + The frames. + The target time. + The number of dropped frames + + + + Seeks to the position at the start of the stream. + + + + + Seeks to the exact or prior frame of the main stream. + Supports byte seeking. + + The target time. + The list of media frames + + + + Reads and decodes packets untill the required media components have frames on or right before the target time. + + The list of frames that is currently being processed. Frames will be added here. + The target time in absolute 0-based time. + The requirement. + The number of decoded frames + + + + Releases unmanaged and - optionally - managed resources. + + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + A data structure containing a quque of packets to process. + This class is thread safe and disposable. + Enqueued, unmanaged packets are disposed automatically by this queue. + Dequeued packets are the responsibility of the calling code. + + + + + Gets the packet count. + + + + + Gets the sum of all the packet sizes contained + by this queue. + + + + + Gets the total duration in stream TimeBase units. + + + + + Gets or sets the at the specified index. + + + The . + + The index. + The packet reference + + + + Peeks the next available packet in the queue without removing it. + If no packets are available, null is returned. + + The packet + + + + Pushes the specified packet into the queue. + In other words, enqueues the packet. + + The packet. + + + + Dequeues a packet from this queue. + + The dequeued packet + + + + Clears and frees all the unmanaged packets from this queue. + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Performs subtitle stream extraction, decoding and text conversion. + + + + + + Initializes a new instance of the class. + + The container. + Index of the stream. + + + + Converts decoded, raw frame data in the frame source into a a usable frame.
+ The process includes performing picture, samples or text conversions + so that the decoded source frame data is easily usable in multimedia applications +
+ The source frame to use as an input. + The target frame that will be updated with the source frame. If null is passed the frame will be instantiated. + The sibling blocks that may help guess some additional parameters for the input frame. + + Return the updated output frame + + input cannot be null +
+ + + Creates a frame source object given the raw FFmpeg subtitle reference. + + The raw FFmpeg subtitle pointer. + The managed frame + + + + Performs video picture decoding, scaling and extraction logic. + + + + + + The output pixel format of the scaler: 24-bit BGR + + + + + Holds a reference to the video scaler + + + + + Initializes a new instance of the class. + + The container. + Index of the stream. + + + + Gets the video scaler flags used to perfom colorspace conversion (if needed). + + + + + Gets the base frame rate as reported by the stream component. + All discrete timestamps can be represented in this framerate. + + + + + Gets the current frame rate as guessed by the last processed frame. + Variable framerate might report different values at different times. + + + + + Gets the width of the picture frame. + + + + + Gets the height of the picture frame. + + + + + Converts decoded, raw frame data in the frame source into a a usable frame.
+ The process includes performing picture, samples or text conversions + so that the decoded source frame data is easily usable in multimedia applications +
+ The source frame to use as an input. + The target frame that will be updated with the source frame. If null is passed the frame will be instantiated. + The siblings to help guess additional frame parameters. + + Return the updated output frame + + input +
+ + + Creates a frame source object given the raw FFmpeg frame reference. + + The raw FFmpeg frame pointer. + Create a managed fraome from an unmanaged one. + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Gets the pixel format replacing deprecated pixel formats. + AV_PIX_FMT_YUVJ + + The frame. + A normalized pixel format + + + + Computes the frame filter arguments that are appropriate for the video filtering chain. + + The frame. + The base filter arguments + + + + If necessary, disposes the existing filtergraph and creates a new one based on the frame arguments. + + The frame. + + avfilter_graph_create_filter + or + avfilter_graph_create_filter + or + avfilter_link + or + avfilter_graph_parse + or + avfilter_graph_config + + + + + Destroys the filtergraph releasing unmanaged resources. + + + + + Represents a control that contains audio and/or video. + In contrast with System.Windows.Controls.MediaElement, this version uses + the FFmpeg library to perform reading and decoding of media streams. + + + + + + + + + Occurs right before the video is presented on the screen. + You can update the pizels on the bitmap before it is rendered on the screen. + Or you could take a screenshot. + Ensure you handle this very quickly as it runs on the UI thread. + + + + + Occurs right before the audio is added to the audio buffer. + You can update the bytes before they are enqueued. + Ensure you handle this quickly before you get choppy audio. + + + + + Occurs right before the subtitles are rendered. + You can update the text. + Ensure you handle this quickly before you get choppy subtitles. + + + + + Raises the rendering video event. + + The bitmap. + The stream. + The smtpe timecode. + The picture number. + The start time. + The duration. + The clock. + + + + Raises the rendering audio event. + + The audio block. + The clock. + + + + Raises the rendering subtitles event. + + The block. + The clock. + True if the rendering should be prevented + + + + This partial class implements: + 1. Packet reading from the Container + 2. Frame Decoding from packet buffer and Block buffering + 3. Block Rendering from block buffer + + + + + Gets or sets a value indicating whether the workedrs have been requested + an exit. + + + + + Gets or sets a value indicating whether the decoder has moved its byte position + to something other than the normal continuous reads in the last read cycle. + + + + + Holds the blocks + + + + + Holds the block renderers + + + + + Holds the last rendered StartTime for each of the media block types + + + + + Gets a value indicating whether more packets can be read from the stream. + This does not check if the packet queue is full. + + + + + Gets a value indicating whether more frames can be decoded from the packet queue. + That is, if we have packets in the packet buffer or if we are not at the end of the stream. + + + + + Runs the read task which keeps a packet buffer as full as possible. + It reports on DownloadProgress by enqueueing an update to the property + in order to avoid any kind of disruption to this thread caused by the UI thread. + + + + + Continually decodes the available packet buffer to have as + many frames as possible in each frame queue and + up to the MaxFrames on each component + + + + + Continuously converts frmes and places them on the corresponding + block buffer. This task is responsible for keeping track of the clock + and calling the render methods appropriate for the current clock position. + + + + + Sets the clock to a discrete video position if possible + + The position. + + + + Gets a value indicating whether more frames can be converted into blocks of the given type. + + The t. + + true if this instance [can read more frames of] the specified t; otherwise, false. + + + + + Sends the given block to its corresponding media renderer. + + The block. + The clock position. + The number of blocks sent to the renderer + + + + Adds the blocks of the given media type. + + The t. + The number of blocks that were added + + + + The command queue to be executed in the order they were sent. + + + + + Represents a real-time time measuring device. + Rendering media should occur as requested by the clock. + + + + + The underlying media container that provides access to + individual media component streams + + + + + Begins or resumes playback of the currently loaded media. + + + + + Pauses playback of the currently loaded media. + + + + + Pauses and rewinds the currently loaded media. + + + + + Closes the currently loaded media. + + + + + The logger + + + + + This is the image that will display the video from a Writeable Bitmap + + + + + To detect redundant calls + + + + + The ffmpeg directory + + + + + IUriContext BaseUri backing + + + + + The position update timer + + + + + When position is being set from within this control, this field will + be set to true. This is useful to detect if the user is setting the position + or if the Position property is being driven from within + + + + + Initializes static members of the class. + + + + + Initializes a new instance of the class. + + + + + Occurs when a logging message from the FFmpeg library has been received. + This is shared across all instances of Media Elements + + + + + Multicast event for property change notifications. + + + + + Occurs when a logging message has been logged. + This does not include FFmpeg messages. + + + + + Gets or sets the FFmpeg path from which to load the FFmpeg binaries. + You must set this path before setting the Source property for the first time on any instance of this control. + Settng this property when FFmpeg binaries have been registered will throw an exception. + + + + + Gets or sets the horizontal alignment characteristics applied to this element when it is + composed within a parent element, such as a panel or items control. + + + + + Gets or sets the base URI of the current application context. + + + + + When position is being set from within this control, this field will + be set to true. This is useful to detect if the user is setting the position + or if the Position property is being driven from within + + + + + Gets the grid control holding the rest of the controls. + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Raises the FFmpegMessageLogged event + + The instance containing the event data. + + + + Updates the position property signaling the update is + coming internally. This is to distinguish between user/binding + written value to the Position Porperty and value set by this control's + internal clock. + + The current position. + + + + Raises the MessageLogged event + + The instance containing the event data. + + + + Checks if a property already matches a desired value. Sets the property and + notifies listeners only when necessary. + + Type of the property. + Reference to a property with both getter and setter. + Desired value for the property. + Name of the property used to notify listeners. This + value is optional and can be provided automatically when invoked from compilers that + support CallerMemberName. + True if the value was changed, false if the existing value matched the + desired value. + + + + Notifies listeners that a property value has changed. + + Name of the property used to notify listeners. This + value is optional and can be provided automatically when invoked from compilers + that support . + + + + Releases unmanaged and - optionally - managed resources. + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + DependencyProperty for FFmpegMediaElement Source property. + + + + + DependencyProperty for Stretch property. + + + + + DependencyProperty for StretchDirection property. + + + + + The DependencyProperty for the MediaElement.Balance property. + + + + + The DependencyProperty for the MediaElement.IsMuted property. + + + + + The DependencyProperty for the MediaElement.SpeedRatio property. + + + + + The DependencyProperty for the MediaElement.Volume property. + + + + + The DependencyProperty for the MediaElement.ScrubbingEnabled property. + + + + + The DependencyProperty for the MediaElement.UnloadedBehavior property. + TODO: Currently this property has no effect. Needs implementation. + + + + + The DependencyProperty for the MediaElement.LoadedBehavior property. + + + + + The DependencyProperty for the MediaElement.Position property. + + + + + Gets/Sets the Source on this MediaElement. + The Source property is the Uri of the media to be played. + + + + + Gets/Sets the Stretch on this MediaElement. + The Stretch property determines how large the MediaElement will be drawn. + + + + + Gets/Sets the stretch direction of the Viewbox, which determines the restrictions on + scaling that are applied to the content inside the Viewbox.  For instance, this property + can be used to prevent the content from being smaller than its native size or larger than + its native size. + + + + + Specifies the behavior that the media element should have when it + is loaded. The default behavior is that it is under manual control + (i.e. the caller should call methods such as Play in order to play + the media). If a source is set, then the default behavior changes to + to be playing the media. If a source is set and a loaded behavior is + also set, then the loaded behavior takes control. + + + + + Gets/Sets the SpeedRatio property on the MediaElement. + + + + + Specifies how the underlying media should behave when + it has ended. The default behavior is to Close the media. + + + + + Gets/Sets the Volume property on the MediaElement. + Note: Valid values are from 0 to 1 + + + + + Gets/Sets the Balance property on the MediaElement. + + + + + Gets/Sets the IsMuted property on the MediaElement. + + + + + Gets or sets a value that indicates whether the MediaElement will update frames + for seek operations while paused. This is a dependency property. + + + + + Gets/Sets the Position property on the MediaElement. + + + + + Provides key-value pairs of the metadata contained in the media. + Returns null when media has not been loaded. + + + + + Gets the media format. Returns null when media has not been loaded. + + + + + Gets the duration of a single frame step. + If there is a video component with a framerate, this propery returns the length of a frame. + If there is no video component it simply returns a tenth of a second. + + + + + Returns whether the given media has audio. + Only valid after the MediaOpened event has fired. + + + + + Returns whether the given media has video. Only valid after the + MediaOpened event has fired. + + + + + Gets the video codec. + Only valid after the MediaOpened event has fired. + + + + + Gets the video bitrate. + Only valid after the MediaOpened event has fired. + + + + + Returns the natural width of the media in the video. + Only valid after the MediaOpened event has fired. + + + + + Returns the natural height of the media in the video. + Only valid after the MediaOpened event has fired. + + + + + Gets the video frame rate. + Only valid after the MediaOpened event has fired. + + + + + Gets the duration in seconds of the video frame. + Only valid after the MediaOpened event has fired. + + + + + Gets the audio codec. + Only valid after the MediaOpened event has fired. + + + + + Gets the audio bitrate. + Only valid after the MediaOpened event has fired. + + + + + Gets the audio channels count. + Only valid after the MediaOpened event has fired. + + + + + Gets the audio sample rate. + Only valid after the MediaOpened event has fired. + + + + + Gets the audio bits per sample. + Only valid after the MediaOpened event has fired. + + + + + Gets the Media's natural duration + Only valid after the MediaOpened event has fired. + + + + + Returns whether the currently loaded media can be paused. + This is only valid after the MediaOpened event has fired. + Note that this property is computed based on wether the stream is detected to be a live stream. + + + + + Returns whether the currently loaded media is live or realtime + This is only valid after the MediaOpened event has fired. + + + + + Gets a value indicating whether the currently loaded media can be seeked. + + + + + Gets a value indicating whether the media is playing. + + + + + Gets a value indicating whether the media has reached its end. + + + + + Get a value indicating whether the media is buffering. + + + + + Gets a value indicating whether the media seeking is in progress. + + + + + Returns the current video SMTPE timecode if available. + If not available, this property returns an empty string. + + + + + Gets a value that indicates the percentage of buffering progress made. + Range is from 0 to 1 + + + + + The wait packet buffer length. + It is adjusted to 1 second if bitrate information is available. + Otherwise, it's simply 512KB + + + + + Gets a value that indicates the percentage of download progress made. + Range is from 0 to 1 + + + + + Gets the maximum packet buffer length, according to the bitrate (if available). + If it's a realtime stream it will return 30 times the buffer cache length. + Otherwise, it will return 4 times of the buffer cache length. + + + + + Gets a value indicating whether the media is in the process of opening. + + + + + Gets a value indicating whether this media element + currently has an open media url. + + + + + Gets the current playback state. + + + + + Updates the metada property. + + + + + Updates the media properties notifying that there are new values to be read from all of them. + Call this method only when necessary because it creates a lot of events. + + + + + Resets the dependency properies. + + + + + BufferingStarted is a routed event + + + + + BufferingEnded is a routed event + + + + + SeekingStarted is a routed event + + + + + SeekingEnded is a routed event + + + + + MediaFailedEvent is a routed event. + + + + + MediaOpened is a routed event. + + + + + MediaOpeningEvent is a routed event. + + + + + MediaEnded is a routed event + + + + + Occurs when buffering of packets was started + + + + + Occurs when buffering of packets was Ended + + + + + Occurs when Seeking of packets was started + + + + + Occurs when Seeking of packets was Ended + + + + + Raised when the media fails to load or a fatal error has occurred which prevents playback. + + + + + Raised when the media is opened + + + + + Raised before the input stream of the media is opened. + Use this method to modify the input options. + + + + + Raised when the corresponding media ends. + + + + + Raises the media failed event. + + The ex. + + + + Raises the media opened event. + + + + + Raises the media opening event. + + + + + Creates a new instance of exception routed event arguments. + This method exists because the constructor has not been made public for that class. + + The routed event. + The sender. + The error exception. + The event arguments + + + + Logs the start of an event + + The event. + + + + Logs the end of an event. + + The event. + + + + Raises the buffering started event. + + + + + Raises the buffering ended event. + + + + + Raises the Seeking started event. + + + + + Raises the Seeking ended event. + + + + + Raises the media ended event. + + + + + A base class to represent media block + rendering event arguments. + + + + + + Initializes a new instance of the class. + + The stream. + The position. + The duration. + The clock. + + + + Provides Stream Information coming from the media container. + + + + + Gets the clock position at which the media + was called for rendering + + + + + Gets the starting time at which this media + has to be presented. + + + + + Gets how long this media has to be presented. + + + + + Provides the audio samples rendering payload as event arguments. + + + + + + Initializes a new instance of the class. + + The buffer. + The length. + The stream. + The start time. + The duration. + The clock. + + + + Gets a pointer to the samples buffer. + Samples are provided in PCM 16-bit signed, interleaved stereo. + + + + + Gets the length in bytes of the samples buffer. + + + + + Gets the number of samples in 1 second. + + + + + Gets the number of channels. + + + + + Gets the number of bits per sample. + + + + + Gets the number of samples in the buffer for all channels. + + + + + Gets the number of samples in the buffer per channel. + + + + + Provides the subtitles rendering payload as event arguments. + + + + + + Initializes a new instance of the class. + + The text. + The original text. + The format. + The stream. + The start time. + The duration. + The clock. + + + + Gets the text stripped out of ASS or SRT formatting. + This is what the default subtitle renderer will display + on the screen. + + + + + Gets the text as originally decoded including + all markup and formatting. + + + + + Gets the type of subtitle format the original + subtitle text is in. + + + + + When set to true, clears the current subtitle and + prevents the subtitle block from being rendered. + + + + + The video rendering event arguments + + + + + + Initializes a new instance of the class. + + The bitmap. + The stream. + The smtpe timecode. + The picture number. + The start time. + The duration. + The clock. + + + + Gets the writable bitmap filled with the video frame pixels. + Feel free to capture or change this image. + + + + + Gets the display picture number (frame number). + If not set by the decoder, this attempts to obtain it by dividing the start time by the + frame duration + + + + + Gets the SMTPE time code. + + + + + Holds media information about the input, its chapters, programs and individual stream components + + + + + Initializes a new instance of the class. + + The container. + + + + Gets the input URL string used to access and create the media container + + + + + Gets the name of the container format. + + + + + Gets the metadata for the input. This may include stuff like title, creation date, company name, etc. + Individual stream components may contain additional metadata. + The metadata + + + + + Gets the duration of the input as reported by the container format. + Individual stream components may have different values + + + + + Gets the start timestamp of the input as reported by the container format. + Individual stream components may have different values + + + + + If available, returns a non-zero value as reported by the container format. + + + + + Gets a list of chapters + + + + + Gets a list of programs with their associated streams. + + + + + Gets the dictionary of stream information components by stream index. + + + + + Provides access to the best streams of each media type found in the container. + This uses some internal FFmpeg heuristics. + + + + + Extracts the stream infos from the input. + + The ic. + The list of stream infos + + + + Finds the best streams for audio video, and subtitles. + + The ic. + The streams. + The star infos + + + + Extracts the chapters from the input. + + The ic. + The chapters + + + + Extracts the programs from the input and creates associations between programs and streams. + + The ic. + The streams. + The program information + + + + Represents media stream information + + + + + Gets the stream identifier. This is different from the stream index. + Typically this value is not very useful. + + + + + Gets the index of the stream. + + + + + Gets the type of the codec. + + + + + Gets the name of the codec type. Audio, Video, Subtitle, Data, etc. + + + + + Gets the codec identifier. + + + + + Gets the name of the codec. + + + + + Gets the codec profile. Only valid for H.264 or + video codecs that use profiles. Otherwise empty. + + + + + Gets the codec tag. Not very useful except for fixing bugs with + some demuxer scenarios. + + + + + Gets a value indicating whether this stream has closed captions. + Typically this is set for video streams. + + + + + Gets a value indicating whether this stream contains lossless compressed data. + + + + + Gets the pixel format. Only valid for Vide streams. + + + + + Gets the width of the video frames. + + + + + Gets the height of the video frames. + + + + + Gets the field order. This is useful to determine + if the video needs deinterlacing + + + + + Gets the video color range. + + + + + Gets the audio sample rate. + + + + + Gets the audio sample format. + + + + + Gets the stream time base unit in seconds. + + + + + Gets the sample aspect ratio. + + + + + Gets the display aspect ratio. + + + + + Gets the reported bit rate. 9 for unavalable. + + + + + Gets the maximum bit rate for variable bitrate streams. 0 if unavailable. + + + + + Gets the number of frames that were read to obtain the stream's information. + + + + + Gets the number of reference frames. + + + + + Gets the average FPS reported by the stream. + + + + + Gets the real (base) framerate of the stream + + + + + Gets the fundamental unit of time in 1/seconds used to represent timestamps in the stream, according to the stream data + + + + + Gets the fundamental unit of time in 1/seconds used to represent timestamps in the stream ,accoring to the codec + + + + + Gets the disposition flags. + Please see ffmpeg.AV_DISPOSITION_* fields. + + + + + Gets the start time. + + + + + Gets the duration. + + + + + Gets the stream's metadata. + + + + + Gets the language string from the stream's metadata. + + + + + Represents a chapter within a container + + + + + Gets the chapter index. + + + + + Gets the chapter identifier. + + + + + Gets the start time of the chapter. + + + + + Gets the end time of the chapter. + + + + + Gets the chapter metadata. + + + + + Represents a program and its associated streams within a container. + + + + + Gets the program number. + + + + + Gets the program identifier. + + + + + Gets the program metadata. + + + + + Gets the associated program streams. + + + + + Gets the name of the program. Empty if unavailable. + + + + + Represents the contents of alogging message that was sent to the log manager. + + + + + + Initializes a new instance of the class. + + The media element. + Type of the message. + The message. + + + + Gets the intance of the MediaElement that generated this message. + When null, it means FFmpeg generated this message. + + + + + Gets the timestamp. + + + + + Gets the type of the message. + + + + + Gets the contents of the message. + + + + + Generic interface for all WaveProviders. + + + + + Gets the WaveFormat of this WaveProvider. + + + + + Fill the specified buffer with wave data. + + The buffer to fill of wave data. + Offset into buffer + The number of bytes to read + + the number of bytes written to the buffer. + + + + + Windows multimedia error codes from mmsystem.h. + + + + no error, MMSYSERR_NOERROR + + + unspecified error, MMSYSERR_ERROR + + + device ID out of range, MMSYSERR_BADDEVICEID + + + driver failed enable, MMSYSERR_NOTENABLED + + + device already allocated, MMSYSERR_ALLOCATED + + + device handle is invalid, MMSYSERR_INVALHANDLE + + + no device driver present, MMSYSERR_NODRIVER + + + memory allocation error, MMSYSERR_NOMEM + + + function isn't supported, MMSYSERR_NOTSUPPORTED + + + error value out of range, MMSYSERR_BADERRNUM + + + invalid flag passed, MMSYSERR_INVALFLAG + + + invalid parameter passed, MMSYSERR_INVALPARAM + + + handle being used simultaneously on another thread (eg callback),MMSYSERR_HANDLEBUSY + + + specified alias not found, MMSYSERR_INVALIDALIAS + + + bad registry database, MMSYSERR_BADDB + + + registry key not found, MMSYSERR_KEYNOTFOUND + + + registry read error, MMSYSERR_READERROR + + + registry write error, MMSYSERR_WRITEERROR + + + registry delete error, MMSYSERR_DELETEERROR + + + registry value not found, MMSYSERR_VALNOTFOUND + + + driver does not call DriverCallback, MMSYSERR_NODRIVERCB + + + more data to be returned, MMSYSERR_MOREDATA + + + unsupported wave format, WAVERR_BADFORMAT + + + still something playing, WAVERR_STILLPLAYING + + + header not prepared, WAVERR_UNPREPARED + + + device is synchronous, WAVERR_SYNC + + + Conversion not possible (ACMERR_NOTPOSSIBLE) + + + Busy (ACMERR_BUSY) + + + Header Unprepared (ACMERR_UNPREPARED) + + + Cancelled (ACMERR_CANCELED) + + + invalid line (MIXERR_INVALLINE) + + + invalid control (MIXERR_INVALCONTROL) + + + invalid value (MIXERR_INVALVALUE) + + + + http://msdn.microsoft.com/en-us/library/dd757347(v=VS.85).aspx + + + + + Enumerates the various wave output playback states + + + + + Stopped + + + + + Playing + + + + + Paused + + + + + Supported wave formats for WaveOutCapabilities + + + + + 11.025 kHz, Mono, 8-bit + + + + + 11.025 kHz, Stereo, 8-bit + + + + + 11.025 kHz, Mono, 16-bit + + + + + 11.025 kHz, Stereo, 16-bit + + + + + 22.05 kHz, Mono, 8-bit + + + + + 22.05 kHz, Stereo, 8-bit + + + + + 22.05 kHz, Mono, 16-bit + + + + + 22.05 kHz, Stereo, 16-bit + + + + + 44.1 kHz, Mono, 8-bit + + + + + 44.1 kHz, Stereo, 8-bit + + + + + 44.1 kHz, Mono, 16-bit + + + + + 44.1 kHz, Stereo, 16-bit + + + + + 44.1 kHz, Mono, 8-bit + + + + + 44.1 kHz, Stereo, 8-bit + + + + + 44.1 kHz, Mono, 16-bit + + + + + 44.1 kHz, Stereo, 16-bit + + + + + 48 kHz, Mono, 8-bit + + + + + 48 kHz, Stereo, 8-bit + + + + + 48 kHz, Mono, 16-bit + + + + + 48 kHz, Stereo, 16-bit + + + + + 96 kHz, Mono, 8-bit + + + + + 96 kHz, Stereo, 8-bit + + + + + 96 kHz, Mono, 16-bit + + + + + 96 kHz, Stereo, 16-bit + + + + + Represents a Wave file format + + + + The format tag -- always 0x0001 PCM + + + number of channels + + + sample rate + + + for buffer estimation + + + block size of data + + + number of bits per sample of mono data + + + number of following bytes + + + + Initializes a new instance of the class. + PCM 48Khz stereo 16 bit signed, interleaved, 2-channel format + + + + + Initializes a new instance of the class. + + Sample Rate + Number of channels + + + + Initializes a new instance of the class. + + The rate. + The bits. + The channels. + channels - channels + + + + Returns the number of channels (1=mono,2=stereo etc) + + + + + Returns the sample rate (samples per second) + + + + + Returns the average number of bytes used per second + + + + + Returns the block alignment + + + + + Returns the number of bits per sample (usually 16 or 32, sometimes 24 or 8) + Can be 0 for some codecs + + + + + Returns the number of extra bytes used by this waveformat. Often 0, + except for compressed formats which store extra data after the WAVEFORMATEX header + + + + + Gets the size of a wave buffer equivalent to the latency in milliseconds. + + The milliseconds. + The size + + + + Reports this WaveFormat as a string + + String describing the wave format + + + + Compares with another WaveFormat object + + Object to compare to + True if the objects are the same + + + + Provides a Hashcode for this WaveFormat + + A hashcode + + + + WaveHeader interop structure (WAVEHDR) + http://msdn.microsoft.com/en-us/library/dd743837%28VS.85%29.aspx + + + + pointer to locked data buffer (lpData) + + + length of data buffer (dwBufferLength) + + + used for input only (dwBytesRecorded) + + + for client's use (dwUser) + + + assorted flags (dwFlags) + + + loop control counter (dwLoops) + + + PWaveHdr, reserved for driver (lpNext) + + + reserved for driver + + + + Wave Header Flags enumeration + + + + + WHDR_BEGINLOOP + This buffer is the first buffer in a loop. This flag is used only with output buffers. + + + + + WHDR_DONE + Set by the device driver to indicate that it is finished with the buffer and is returning it to the application. + + + + + WHDR_ENDLOOP + This buffer is the last buffer in a loop. This flag is used only with output buffers. + + + + + WHDR_INQUEUE + Set by Windows to indicate that the buffer is queued for playback. + + + + + WHDR_PREPARED + Set by Windows to indicate that the buffer has been prepared with the waveInPrepareHeader or waveOutPrepareHeader function. + + + + + MME Wave function interop + + + + + CALLBACK_NULL + No callback + + + + + CALLBACK_FUNCTION + dwCallback is a FARPROC + + + + + CALLBACK_EVENT + dwCallback is an EVENT handle + + + + + CALLBACK_WINDOW + dwCallback is a HWND + + + + + CALLBACK_THREAD + callback is a thread ID + + + + + WIM_OPEN + + + + + WIM_CLOSE + + + + + WIM_DATA + + + + + WOM_CLOSE + + + + + WOM_DONE + + + + + WOM_OPEN + + + + + A wrapper class for MmException. + + + + + Initializes a new instance of the class. + + The result returned by the Windows API call + The name of the Windows API that failed + + + + Returns the Windows API result + + + + + Helper function to automatically raise an exception on failure + + The result of the API call + The API function name + + + + Creates an error message base don an erro result. + + The result. + The function. + A descriptive rror message + + + + A buffer of Wave samples for streaming to a Wave Output device + + + + + Initializes a new instance of the class. + + WaveOut device to write to + Buffer size in bytes + Stream to provide more data + Lock to protect WaveOut API's from being called on >1 thread + + + + Finalizes an instance of the class. + + + + + Whether the header's in queue flag is set + + + + + The buffer size in bytes + + + + + Releases resources held by this WaveBuffer + + + + + this is called by the Wave callback and should be used to refill the buffer. + This calls the .Read method on the stream + + true when bytes were written. False if no bytes were written. + + + + Releases resources held by this WaveBuffer + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Writes to wave out. + + waveOutWrite + + + + WaveOutCapabilities structure (based on WAVEOUTCAPS2 from mmsystem.h) + http://msdn.microsoft.com/library/default.asp?url=/library/en-us/multimed/htm/_win32_waveoutcaps_str.asp + + + + + wMid + + + + + wPid + + + + + vDriverVersion + + + + + Product Name (szPname) + + + + + Supported formats (bit flags) dwFormats + + + + + Supported channels (1 for mono 2 for stereo) (wChannels) + Seems to be set to -1 on a lot of devices + + + + + wReserved1 + + + + + Optional functionality supported by the device + + + + + Number of channels supported + + + + + Whether playback rate control is supported + + + + + Whether volume control is supported + + + + + Gets a value indicating whether this device supports independent channel volume control. + + + + + Gets a value indicating whether this device supports pitch control. + + + + + Gets a value indicating whether the device returns sample-accurate position information. + + + + + Gets a value indicating whether the driver is synchronous and will block while playing a buffer. + + + + + The product name + + + + + The device name Guid (if provided) + + + + + The product name Guid (if provided) + + + + + The manufacturer guid (if provided) + + + + + Checks to see if a given SupportedWaveFormat is supported + + The SupportedWaveFormat + true if supported + + + + Flags indicating what features this WaveOut device supports + + + + supports pitch control (WAVECAPS_PITCH) + + + supports playback rate control (WAVECAPS_PLAYBACKRATE) + + + supports volume control (WAVECAPS_VOLUME) + + + supports separate left-right volume control (WAVECAPS_LRVOLUME) + + + (WAVECAPS_SYNC) + + + (WAVECAPS_SAMPLEACCURATE) + + + + A wave player that opens an audio device and continuously feeds it + with audio samples using a wave provider. + + + + + Initializes a new instance of the class. + + The renderer. + + + + Finalizes an instance of the class. + + + + + Gets or sets the desired latency in milliseconds + Should be set before a call to Init + + + + + Gets or sets the number of buffers used + Should be set before a call to Init + + + + + Gets or sets the device number + Should be set before a call to Init + This must be between -1 and DeviceCount - 1. + -1 means stick to default device even default device is changed + + + + + Gets a instance indicating the format the hardware is using. + + + + + Playback State + + + + + Gets the capabilities. + + + + + Initializes the specified wave provider. + + The wave provider. + Can't re-initialize during playback + + + + Start playing the audio from the WaveStream + + + + + Pause the audio + + + + + Stop and reset the WaveOut device + + + + + Gets the current position in bytes from the wave output device. + (n.b. this is not the same thing as the position within your reader + stream - it calls directly into waveOutGetPosition) + + Position in bytes + + + + Closes this WaveOut device + + + + + Closes the WaveOut device and disposes of buffers + + True if called from Dispose + + + + Resume playing after a pause from the same position + + + + + Starts the playback thread. + + + + + Performs the continuous playback. + + + + + Closes the wave device. + + + + + Disposes the buffers. + + + + + Provides Audio Output capabilities by writing samples to the default audio output device. + + + + + + + + Initializes a new instance of the class. + + The media element. + + + + Gets the output format of the audio + + + + + Gets the parent media element. + + + + + Gets or sets the volume. + + + The volume. + + + + + Gets or sets the balance (-1.0 to 1.0). + + + + + Gets or sets a value indicating whether the wave output is muted. + + + + + Gets the realtime latency of the audio relative to the internal wall clock. + A negative value means audio is ahead of the wall clock. + A positive value means audio is behind of the wall clock. + + + + + Gets current audio the position. + + + + + Gets the desired latency odf the audio device. + Value is always positive and typically 200ms. This means audio gets rendered up to this late behind the wall clock. + + + + + Gets the speed ratio. + + + + + Renders the specified media block. + + The media block. + The clock position. + + + + Called on every block rendering clock cycle just in case some update operation needs to be performed. + This needs to return immediately so the calling thread is not disturbed. + + The clock position. + + + + Executed when the Play method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Close method is called on the parent MediaElement + + + + + Executed after a Seek operation is performed on the parent MediaElement + + + + + Waits for the renderer to be ready to render. + + + + + Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources. + + + + + Called whenever the audio driver requests samples. + Do not call this method directly. + + The render buffer. + The render buffer offset. + The requested bytes. + The number of bytes that were read. + + + + Called when [application exit]. + + The sender. + The instance containing the event data. + + + + Initializes the audio renderer. + Call the Play Method to start reading samples + + + + + Destroys the audio renderer. + Makes it useless. + + + + + Synchronizes audio rendering to the wall clock. + Returns true if additional samples need to be read. + Returns false if silence has been written and no further reading is required. + + The target buffer. + The target buffer offset. + The requested bytes. + True to continue processing. False to write silence. + + + + Reads from the Audio Buffer and stretches the samples to the required requested bytes. + This will make audio samples sound stretched (low pitch). + The result is put to the first requestedBytes count of the ReadBuffer. + requested + + The requested bytes. + + + + Reads from the Audio Buffer and shrinks (averages) the samples to the required requested bytes. + This will make audio samples sound shrunken (high pitch). + The result is put to the first requestedBytes count of the ReadBuffer. + + The requested number of bytes. + if set to true average samples per block. Otherwise, take the first sample per block only + + + + Applies volume and balance to the audio samples storead in RedBuffer and writes them + to the specified target buffer. + + The target buffer. + The target buffer offset. + The requested number of bytes. + + + + Releases unmanaged and - optionally - managed resources. + + + true to release both managed and unmanaged resources; false to release only unmanaged resources. + + + + Provides a unified API for media rendering classes + + + + + Gets the parent media element. + + + + + Waits for the renderer to be ready to render. + + + + + Executed when the Play method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Close method is called on the parent MediaElement + + + + + Executed after a Seek operation is performed on the parent MediaElement + + + + + Called when a media block is due rendering. + This needs to return immediately so the calling thread is not disturbed. + + The media block. + The clock position. + + + + Called on every block rendering clock cycle just in case some update operation needs to be performed. + This needs to return immediately so the calling thread is not disturbed. + + The clock position. + + + + Subtitle Renderer - Does nothing at this point. + + + + + + The synchronize lock + + + + + Holds the text to be rendered when the Update method is called. + + + + + Holds the text that was last rendered when Update was called. + + + + + Initializes a new instance of the class. + + The media element. + + + + Gets the parent media element. + + + + + Executed when the Close method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Play method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed after a Seek operation is performed on the parent MediaElement + + + + + Waits for the renderer to be ready to render. + + + + + Renders the specified media block. + + The media block. + The clock position. + + + + Called when a media block must stop being rendered. + This needs to return immediately so the calling thread is not disturbed. + + The clock position. + + + + Gets or creates the tex blocks that make up the subtitle text and outline. + + The text blocks including the fill and outline (5 total) + + + + Sets the text to be rendered on the text blocks. + Returns immediately because it enqueues the action on the UI thread. + + The text. + + + + Provides Video Image Rendering via a WPF Writable Bitmap + + + + + + The bitmap that is presented to the user. + + + + + Set when a bitmap is being written to the target bitmap + + + + + Initializes a new instance of the class. + + The media element. + + + + Gets the parent media element. + + + + + Executed when the Play method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Pause method is called on the parent MediaElement + + + + + Executed when the Close method is called on the parent MediaElement + + + + + Executed after a Seek operation is performed on the parent MediaElement + + + + + Waits for the renderer to be ready to render. + + + + + Renders the specified media block. + This needs to return immediately so the calling thread is not disturbed. + + The media block. + The clock position. + + + + Called on every block rendering clock cycle just in case some update operation needs to be performed. + This needs to return immediately so the calling thread is not disturbed. + + The clock position. + + + + Initializes the target bitmap. Pass a null block to initialize with the default video properties. + + The block. + + + + Applies the scale transform according to the block's aspect ratio. + + The b. + + + + Defines the different log message types received by the log handler + + + + + The none messge type + + + + + The information messge type + + + + + The debug messge type + + + + + The trace messge type + + + + + The error messge type + + + + + The warning messge type + + + + + A Media Container Exception + + + + + + Initializes a new instance of the class. + + The message that describes the error. + + + + Represents a set of options that are used to initialize a media container. + + + + + Initializes a new instance of the class. + + + + + Gets or sets the forced input format. If let null or empty, + the input format will be selected automatically. + + + + + Gets or sets a value indicating whether [enable low resource]. + In theroy this should be 0,1,2,3 for 1, 1/2, 1,4 and 1/8 resolutions. + TODO: We are for now just supporting 1/2 rest (true value) + Port of lowres. + + + + + Gets or sets a value indicating whether [enable fast decoding]. + Port of fast + + + + + A dictionary of Format options. + Supported format options are specified in https://www.ffmpeg.org/ffmpeg-formats.html#Format-Options + + + + + Gets the codec options. + Codec options are documented here: https://www.ffmpeg.org/ffmpeg-codecs.html#Codec-Options + Port of codec_opts + + + + + Gets or sets a value indicating whether experimental hardware acceleration is enabled. + Defaults to false. This feature is experimental. + + + + + Gets or sets a value indicating whether PTS are generated automatically and not read + from the packets themselves. Defaults to false. + Port of genpts + + + + + Gets or sets the maximum duration to be analyzed before ifentifying stream information. + In realtime streams this can be reduced to reduce latency (i.e. TimeSpan.Zero) + + + + + Gets or sets the amount of bytes to probe before getting the stram info + In realtime streams probesize can be reduced to reduce latency. + Minimum value is 32. + + + + + Gets or sets the amount of time to wait for a an open or read operation to complete. + + + + + Prevent reading from audio stream components. + Port of audio_disable + + + + + Prevent reading from video stream components. + Port of video_disable + + + + + Prevent reading from subtitle stream components. + Port of subtitle_disable + Subtitles are not yet first-class citizens in FFmpeg and + this is why they are disabled by default. + + + + + Allows for a custom video filter string. + Please see: https://ffmpeg.org/ffmpeg-filters.html#Video-Filters + + + + + Initially contains the best suitable video stream. + Can be changed to a different stream reference. + + + + + Allows for a custom audio filter string. + Please see: https://ffmpeg.org/ffmpeg-filters.html#Audio-Filters + + + + + Initially contains the best suitable audio stream. + Can be changed to a different stream reference. + + + + + Initially contains the best suitable subititle stream. + Can be changed to a different stream reference. + + + + + Represents a set of codec options associated with a stream specifier. + + + + + Holds the internal list of option items + + + + + Initializes a new instance of the class. + + + + + Adds an option + + The key. + The value. + Type of the stream. + + + + Adds an option + + The key. + The value. + Index of the stream. + + + + Adds an option + + The key. + The value. + Type of the stream. + Index of the stream. + + + + Retrieves a dictionary with the options for the specified codec. + Port of filter_codec_opts + + The codec identifier. + The format. + The stream. + The codec. + The filtered options + + + + Retrieves an array of dictionaries, one for each stream index + https://ffmpeg.org/ffplay.html#toc-Options + Port of setup_find_stream_info_opts. + + The format. + The options per stream + + + + Converts a character to a media type. + + The c. + The media type + + + + Represents the event arguments of the MediaOpening routed event. + + + + + + Initializes a new instance of the class. + + The routed event. + The source. + The options. + The input information. + + + + Set or change the options before the media is opened. + + + + + Provides internal details of the media, inclusing its component streams. + Typically, options are set based on what this information contains. + + + + + A strongly-typed resource class, for looking up localized strings, etc. + + + + + Returns the cached ResourceManager instance used by this class. + + + + + Overrides the current thread's CurrentUICulture property for all + resource lookups using this strongly typed resource class. + + + + + Looks up a localized resource of type System.Drawing.Bitmap. + + +
+
diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml index d02d378..070601d 100644 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml @@ -3,7 +3,7 @@ xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:d="http://schemas.microsoft.com/expression/blend/2008" - xmlns:vlc="clr-namespace:Meta.Vlc.Wpf;assembly=Meta.Vlc.Wpf" + xmlns:ffme="clr-namespace:Unosquare.FFME;assembly=ffme" xmlns:local="clr-namespace:QuickLook.Plugin.VideoViewer" xmlns:glassLayer="clr-namespace:QuickLook.Controls.GlassLayer;assembly=QuickLook" mc:Ignorable="d" @@ -13,6 +13,7 @@ + @@ -20,14 +21,13 @@ - + @@ -178,14 +178,16 @@ + SmallChange="{Binding FrameStepDuration, ElementName=mediaElement, Converter={StaticResource TimeSpanToSecondsConverter}}" + LargeChange="{Binding FrameStepDuration, ElementName=mediaElement, Converter={StaticResource TimeSpanToSecondsConverter}}" + Maximum="{Binding NaturalDuration, ElementName=mediaElement, Converter={StaticResource TimeSpanToSecondsConverter}}" + Value="{Binding Position, ElementName=mediaElement, Converter={StaticResource TimeSpanToSecondsConverter}}" /> - diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml.cs b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml.cs index 12093dc..43ef8a5 100644 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml.cs +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ViewerPanel.xaml.cs @@ -18,18 +18,18 @@ using System; using System.ComponentModel; using System.Diagnostics; -using System.Drawing; +using System.IO; +using System.Linq; using System.Runtime.CompilerServices; -using System.Threading.Tasks; using System.Windows; using System.Windows.Controls; using System.Windows.Input; using System.Windows.Media.Animation; -using Meta.Vlc; -using Meta.Vlc.Interop.Media; +using System.Windows.Media.Imaging; using QuickLook.Annotations; using QuickLook.ExtensionMethods; -using MediaState = Meta.Vlc.Interop.Media.MediaState; +using TagLib; +using File = TagLib.File; namespace QuickLook.Plugin.VideoViewer { @@ -40,16 +40,13 @@ namespace QuickLook.Plugin.VideoViewer { private readonly ContextObject _context; - private Uri _coverArt; - private bool _hasAudio; - private bool _hasEnded; - private bool _hasVideo; - private bool _isMuted; - private bool _isPlaying; + private BitmapSource _coverArt; private bool _wasPlaying; - public ViewerPanel(ContextObject context) + public ViewerPanel(ContextObject context, bool hasVideo) { + ShowVideo = hasVideo; + InitializeComponent(); // apply global theme @@ -58,8 +55,8 @@ namespace QuickLook.Plugin.VideoViewer ShowViedoControlContainer(null, null); viewerPanel.PreviewMouseMove += ShowViedoControlContainer; - mediaElement.PropertyChanged += PlayerPropertyChanged; - mediaElement.StateChanged += PlayerStateChanged; + //mediaElement.PropertyChanged += PlayerPropertyChanged; + //mediaElement.StateChanged += PlayerStateChanged; _context = context; @@ -70,15 +67,14 @@ namespace QuickLook.Plugin.VideoViewer sliderProgress.PreviewMouseDown += (sender, e) => { - _wasPlaying = mediaElement.VlcMediaPlayer.IsPlaying; + _wasPlaying = mediaElement.IsPlaying; mediaElement.Pause(); }; sliderProgress.PreviewMouseUp += (sender, e) => { if (_wasPlaying) mediaElement.Play(); }; - - mediaElement.VlcMediaPlayer.EncounteredError += ShowErrorNotification; + mediaElement.MediaFailed += ShowErrorNotification; /*mediaElement.MediaEnded += (s, e) => { if (mediaElement.IsOpen) @@ -89,90 +85,32 @@ namespace QuickLook.Plugin.VideoViewer } };*/ - PreviewMouseWheel += (sender, e) => ChangeVolume((double) e.Delta / 120 * 4); + PreviewMouseWheel += (sender, e) => ChangeVolume((double) e.Delta / 120 * 0.05); } - public bool IsMuted - { - get => _isMuted; - set - { - if (value == _isMuted) return; - _isMuted = value; - mediaElement.IsMute = value; - OnPropertyChanged(); - } - } + public bool ShowVideo { get; private set; } - public bool HasEnded - { - get => _hasEnded; - private set - { - if (value == _hasEnded) return; - _hasEnded = value; - OnPropertyChanged(); - } - } - - public bool HasAudio - { - get => _hasAudio; - private set - { - if (value == _hasAudio) return; - _hasAudio = value; - OnPropertyChanged(); - } - } - - public bool HasVideo - { - get => _hasVideo; - private set - { - if (value == _hasVideo) return; - _hasVideo = value; - OnPropertyChanged(); - } - } - - public bool IsPlaying - { - get => _isPlaying; - private set - { - if (value == _isPlaying) return; - _isPlaying = value; - OnPropertyChanged(); - } - } - - public Uri CoverArt + public BitmapSource CoverArt { get => _coverArt; private set { - if (value == _coverArt) return; + if (Equals(value, _coverArt)) return; if (value == null) return; _coverArt = value; OnPropertyChanged(); } } - public string LibVlcPath { get; } = VlcSettings.LibVlcPath; - - public string[] VlcOption { get; } = VlcSettings.VlcOptions; - public void Dispose() { + GC.SuppressFinalize(this); + try { - Task.Run(() => - { - mediaElement?.Dispose(); - mediaElement = null; - }); + CoverArt = null; + mediaElement?.Dispose(); + mediaElement = null; } catch (Exception e) { @@ -191,7 +129,7 @@ namespace QuickLook.Plugin.VideoViewer private void AutoHideViedoControlContainer(object sender, EventArgs e) { - if (!HasVideo) + if (ShowVideo) return; if (videoControlContainer.IsMouseOver) @@ -202,69 +140,26 @@ namespace QuickLook.Plugin.VideoViewer hide.Begin(); } - private void PlayerStop(object sender, MouseButtonEventArgs e) + private void UpdateMeta(string path) { - HasEnded = false; - IsPlaying = false; - mediaElement.Position = 0; - mediaElement.Stop(); - } - - private void PlayerPropertyChanged(object sender, PropertyChangedEventArgs e) - { - var prop = e.PropertyName; - - switch (prop) - { - case nameof(mediaElement.IsMute): - IsMuted = mediaElement.IsMute; - break; - } - } - - private void PlayerStateChanged(object sender, ObjectEventArgs e) - { - var state = e.Value; - - switch (state) - { - case MediaState.Opening: - HasVideo = mediaElement.VlcMediaPlayer.VideoTrackCount > 0; - HasAudio = mediaElement.VlcMediaPlayer.AudioTrackCount > 0; - break; - case MediaState.Playing: - UpdateMeta(); - DetermineTheme(); - HasVideo = mediaElement.VlcMediaPlayer.VideoTrackCount > 0; - HasAudio = mediaElement.VlcMediaPlayer.AudioTrackCount > 0; - IsPlaying = true; - break; - case MediaState.Paused: - IsPlaying = false; - break; - case MediaState.Ended: - IsPlaying = false; - HasEnded = true; - break; - case MediaState.Error: - ShowErrorNotification(sender, e); - break; - } - } - - private void UpdateMeta() - { - if (HasVideo) + if (ShowVideo) return; - var path = mediaElement.VlcMediaPlayer.Media.GetMeta(MetaDataType.ArtworkUrl); - if (!string.IsNullOrEmpty(path)) - CoverArt = new Uri(path); - - metaTitle.Text = mediaElement.VlcMediaPlayer.Media.GetMeta(MetaDataType.Title); - metaArtists.Text = mediaElement.VlcMediaPlayer.Media.GetMeta(MetaDataType.Artist); - metaAlbum.Text = mediaElement.VlcMediaPlayer.Media.GetMeta(MetaDataType.Album); + using (var h = File.Create(path)) + { + metaTitle.Text = h.Tag.Title; + metaArtists.Text = h.Tag.FirstPerformer; + metaAlbum.Text = h.Tag.Album; + //var cs = h.Tag.Pictures.FirstOrDefault(p => p.Type == TagLib.PictureType.FrontCover); + var cs = h.Tag.Pictures.FirstOrDefault(); + if (cs != default(IPicture)) + using (var ms = new MemoryStream(cs.Data.Data)) + { + CoverArt = BitmapFrame.Create(ms, BitmapCreateOptions.None, BitmapCacheOption.OnLoad); + DetermineTheme(); + } + } metaArtists.Visibility = string.IsNullOrEmpty(metaArtists.Text) ? Visibility.Collapsed : Visibility.Visible; @@ -275,16 +170,16 @@ namespace QuickLook.Plugin.VideoViewer private void DetermineTheme() { - if (HasVideo) + if (ShowVideo) return; if (CoverArt == null) return; - var dark = false; - using (var bitmap = new Bitmap(CoverArt.LocalPath)) + bool dark; + using (var b = CoverArt.ToBitmap()) { - dark = bitmap.IsDarkImage(); + dark = b.IsDarkImage(); } _context.UseDarkTheme = dark; @@ -292,34 +187,20 @@ namespace QuickLook.Plugin.VideoViewer private void ChangeVolume(double delta) { - IsMuted = false; + mediaElement.IsMuted = false; - var newVol = mediaElement.Volume + (int) delta; - newVol = Math.Max(newVol, 0); - newVol = Math.Min(newVol, 100); - - mediaElement.Volume = newVol; - } - - private void Seek(TimeSpan delta) - { - _wasPlaying = mediaElement.VlcMediaPlayer.IsPlaying; - mediaElement.Pause(); - - mediaElement.Time += delta; - - if (_wasPlaying) mediaElement.Play(); + mediaElement.Volume += delta; } private void TogglePlayPause(object sender, EventArgs e) { - if (mediaElement.VlcMediaPlayer.IsPlaying) + if (mediaElement.IsPlaying) { mediaElement.Pause(); } else { - if (HasEnded) + if (mediaElement.HasMediaEnded) mediaElement.Stop(); mediaElement.Play(); } @@ -339,15 +220,16 @@ namespace QuickLook.Plugin.VideoViewer public void LoadAndPlay(string path) { - mediaElement.LoadMedia(path); - mediaElement.Volume = 50; + UpdateMeta(path); + + mediaElement.Source = new Uri(path); + mediaElement.Volume = 0.5; mediaElement.Play(); } ~ViewerPanel() { - GC.SuppressFinalize(this); Dispose(); } diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/VlcSettings.cs b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/VlcSettings.cs deleted file mode 100644 index 4e35fe9..0000000 --- a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/VlcSettings.cs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright © 2017 Paddy Xu -// -// This file is part of QuickLook program. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -using System.IO; -using System.Reflection; - -namespace QuickLook.Plugin.VideoViewer -{ - public static class VlcSettings - { - public static string LibVlcPath = Path.Combine( - Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), - App.Is64Bit ? @"LibVLC\x64" : @"LibVLC\x86"); - - public static string[] VlcOptions = - { - "-I", "--dummy-quiet", "--ignore-config", "--no-video-title", "--no-sub-autodetect-file" - }; - } -} \ No newline at end of file diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ffmpeg/FFprobe.cs b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ffmpeg/FFprobe.cs new file mode 100644 index 0000000..e5f57a3 --- /dev/null +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/ffmpeg/FFprobe.cs @@ -0,0 +1,131 @@ +// Copyright © 2017 Paddy Xu +// +// This file is part of QuickLook program. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +using System; +using System.Diagnostics; +using System.IO; +using System.Reflection; +using System.Text; +using System.Windows; +using System.Xml.XPath; + +namespace QuickLook.Plugin.VideoViewer.FFmpeg +{ + internal class FFprobe + { + private static readonly string _probePath = + Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "FFmpeg\\", + App.Is64Bit ? "x64\\" : "x86\\", "ffprobe.exe"); + + private XPathNavigator _infoNavigator; + + public FFprobe(string media) + { + Run(media); + } + + private bool Run(string media) + { + string result; + + using (var p = new Process()) + { + p.StartInfo.UseShellExecute = false; + p.StartInfo.CreateNoWindow = true; + p.StartInfo.RedirectStandardOutput = true; + p.StartInfo.FileName = _probePath; + p.StartInfo.Arguments = $"-v quiet -print_format xml -show_streams -show_format \"{media}\""; + p.StartInfo.StandardOutputEncoding = Encoding.UTF8; + p.Start(); + p.WaitForExit(); + + result = p.StandardOutput.ReadToEnd(); + } + + if (string.IsNullOrWhiteSpace(result)) + return false; + + ParseResult(result); + return true; + } + + private void ParseResult(string result) + { + _infoNavigator = new XPathDocument(new StringReader(result)).CreateNavigator(); + } + + public bool CanDecode() + { + var info = _infoNavigator.SelectSingleNode("/ffprobe/format[@probe_score>25]"); + + return info != null; + } + + public string GetFormatName() + { + var format = _infoNavigator.SelectSingleNode("/ffprobe/format/@format_name")?.Value; + + return format ?? string.Empty; + } + + public string GetFormatLongName() + { + var format = _infoNavigator.SelectSingleNode("/ffprobe/format/@format_long_name")?.Value; + + return format ?? string.Empty; + } + + public bool HasAudio() + { + var duration = _infoNavigator.SelectSingleNode("/ffprobe/streams/stream[@codec_type='audio'][1]/@duration") + ?.Value; + + if (duration == null) + return false; + + double.TryParse(duration, out var d); + return Math.Abs(d) > 0.01; + } + + public bool HasVideo() + { + var fps = _infoNavigator.SelectSingleNode("/ffprobe/streams/stream[@codec_type='video'][1]/@avg_frame_rate") + ?.Value; + + if (fps == null) + return false; + + return fps != "0/0"; + } + + public Size GetViewSize() + { + if (!HasVideo()) + return Size.Empty; + + var width = _infoNavigator.SelectSingleNode("/ffprobe/streams/stream[@codec_type='video'][1]/@coded_width") + ?.Value; + var height = _infoNavigator.SelectSingleNode("/ffprobe/streams/stream[@codec_type='video'][1]/@coded_height") + ?.Value; + + if (width == null || height == null) + return Size.Empty; + + return new Size(double.Parse(width), double.Parse(height)); + } + } +} \ No newline at end of file diff --git a/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/packages.config b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/packages.config new file mode 100644 index 0000000..a91e250 --- /dev/null +++ b/QuickLook.Plugin/QuickLook.Plugin.VideoViewer/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/QuickLook/ExtensionMethods/BitmapExtensions.cs b/QuickLook/ExtensionMethods/BitmapExtensions.cs index 492c4c2..7ba49e8 100644 --- a/QuickLook/ExtensionMethods/BitmapExtensions.cs +++ b/QuickLook/ExtensionMethods/BitmapExtensions.cs @@ -18,6 +18,7 @@ using System; using System.Drawing; using System.Drawing.Imaging; +using System.IO; using System.Threading.Tasks; using System.Windows.Media; using System.Windows.Media.Imaging; @@ -66,6 +67,19 @@ namespace QuickLook.ExtensionMethods return bs; } + public static Bitmap ToBitmap(this BitmapSource source) + { + using (var outStream = new MemoryStream()) + { + BitmapEncoder enc = new BmpBitmapEncoder(); + enc.Frames.Add(BitmapFrame.Create(source)); + enc.Save(outStream); + var bitmap = new Bitmap(outStream); + + return new Bitmap(bitmap); + } + } + private static PixelFormat ConvertPixelFormat( System.Drawing.Imaging.PixelFormat sourceFormat) { @@ -89,7 +103,7 @@ namespace QuickLook.ExtensionMethods image = image.Clone(new Rectangle(0, 0, image.Width, image.Height), System.Drawing.Imaging.PixelFormat.Format24bppRgb); - var sampleCount = (int) (0.2 * image.Width * image.Height); + var sampleCount = (int) (0.2 * 400 * 400); const int pixelSize = 24 / 8; var data = image.LockBits(new Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadWrite, image.PixelFormat);